1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "Utils/X86ShuffleDecode.h"
16 #include "X86CallingConv.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86IntrinsicsInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/WinEHFuncInfo.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GlobalAlias.h"
45 #include "llvm/IR/GlobalVariable.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/MC/MCAsmInfo.h"
49 #include "llvm/MC/MCContext.h"
50 #include "llvm/MC/MCExpr.h"
51 #include "llvm/MC/MCSymbol.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Target/TargetOptions.h"
64 #define DEBUG_TYPE "x86-isel"
66 STATISTIC(NumTailCalls, "Number of tail calls");
68 static cl::opt<bool> ExperimentalVectorWideningLegalization(
69 "x86-experimental-vector-widening-legalization", cl::init(false),
70 cl::desc("Enable an experimental vector type legalization through widening "
71 "rather than promotion."),
74 static cl::opt<int> ExperimentalPrefLoopAlignment(
75 "x86-experimental-pref-loop-alignment", cl::init(4),
76 cl::desc("Sets the preferable loop alignment for experiments "
77 "(the last x86-experimental-pref-loop-alignment bits"
78 " of the loop header PC will be 0)."),
81 static cl::opt<bool> MulConstantOptimization(
82 "mul-constant-optimization", cl::init(true),
83 cl::desc("Replace 'mul x, Const' with more effective instructions like "
87 /// Call this when the user attempts to do something unsupported, like
88 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
89 /// report_fatal_error, so calling code should attempt to recover without
91 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
93 MachineFunction &MF = DAG.getMachineFunction();
94 DAG.getContext()->diagnose(
95 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
98 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
99 const X86Subtarget &STI)
100 : TargetLowering(TM), Subtarget(STI) {
101 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
102 X86ScalarSSEf64 = Subtarget.hasSSE2();
103 X86ScalarSSEf32 = Subtarget.hasSSE1();
104 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
106 // Set up the TargetLowering object.
108 // X86 is weird. It always uses i8 for shift amounts and setcc results.
109 setBooleanContents(ZeroOrOneBooleanContent);
110 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
111 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
113 // For 64-bit, since we have so many registers, use the ILP scheduler.
114 // For 32-bit, use the register pressure specific scheduling.
115 // For Atom, always use ILP scheduling.
116 if (Subtarget.isAtom())
117 setSchedulingPreference(Sched::ILP);
118 else if (Subtarget.is64Bit())
119 setSchedulingPreference(Sched::ILP);
121 setSchedulingPreference(Sched::RegPressure);
122 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
123 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
125 // Bypass expensive divides and use cheaper ones.
126 if (TM.getOptLevel() >= CodeGenOpt::Default) {
127 if (Subtarget.hasSlowDivide32())
128 addBypassSlowDiv(32, 8);
129 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
130 addBypassSlowDiv(64, 32);
133 if (Subtarget.isTargetWindowsMSVC() ||
134 Subtarget.isTargetWindowsItanium()) {
135 // Setup Windows compiler runtime calls.
136 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
137 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
138 setLibcallName(RTLIB::SREM_I64, "_allrem");
139 setLibcallName(RTLIB::UREM_I64, "_aullrem");
140 setLibcallName(RTLIB::MUL_I64, "_allmul");
141 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
142 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
143 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
144 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
145 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
148 if (Subtarget.isTargetDarwin()) {
149 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
150 setUseUnderscoreSetJmp(false);
151 setUseUnderscoreLongJmp(false);
152 } else if (Subtarget.isTargetWindowsGNU()) {
153 // MS runtime is weird: it exports _setjmp, but longjmp!
154 setUseUnderscoreSetJmp(true);
155 setUseUnderscoreLongJmp(false);
157 setUseUnderscoreSetJmp(true);
158 setUseUnderscoreLongJmp(true);
161 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
162 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
163 // FIXME: Should we be limitting the atomic size on other configs? Default is
165 if (!Subtarget.hasCmpxchg8b())
166 setMaxAtomicSizeInBitsSupported(32);
168 // Set up the register classes.
169 addRegisterClass(MVT::i8, &X86::GR8RegClass);
170 addRegisterClass(MVT::i16, &X86::GR16RegClass);
171 addRegisterClass(MVT::i32, &X86::GR32RegClass);
172 if (Subtarget.is64Bit())
173 addRegisterClass(MVT::i64, &X86::GR64RegClass);
175 for (MVT VT : MVT::integer_valuetypes())
176 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
178 // We don't accept any truncstore of integer registers.
179 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
180 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
181 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
182 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
183 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
184 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
186 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
188 // SETOEQ and SETUNE require checking two conditions.
189 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
190 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
191 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
192 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
193 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
194 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
197 if (Subtarget.hasCMov()) {
198 setOperationAction(ISD::ABS , MVT::i16 , Custom);
199 setOperationAction(ISD::ABS , MVT::i32 , Custom);
201 setOperationAction(ISD::ABS , MVT::i64 , Custom);
204 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
205 setOperationAction(ShiftOp , MVT::i16 , Custom);
206 setOperationAction(ShiftOp , MVT::i32 , Custom);
207 if (Subtarget.is64Bit())
208 setOperationAction(ShiftOp , MVT::i64 , Custom);
211 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
213 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
214 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
215 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
217 if (Subtarget.is64Bit()) {
218 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
219 // f32/f64 are legal, f80 is custom.
220 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
222 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
223 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
224 } else if (!Subtarget.useSoftFloat()) {
225 // We have an algorithm for SSE2->double, and we turn this into a
226 // 64-bit FILD followed by conditional FADD for other targets.
227 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
228 // We have an algorithm for SSE2, and we turn this into a 64-bit
229 // FILD or VCVTUSI2SS/SD for other targets.
230 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
232 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
235 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
237 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
238 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
240 if (!Subtarget.useSoftFloat()) {
241 // SSE has no i16 to fp conversion, only i32.
242 if (X86ScalarSSEf32) {
243 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
244 // f32 and f64 cases are Legal, f80 case is not
245 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
247 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
248 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
251 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
252 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
255 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
257 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
258 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
260 if (!Subtarget.useSoftFloat()) {
261 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
262 // are Legal, f80 is custom lowered.
263 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
264 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
266 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
267 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
269 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
270 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
271 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
274 // Handle FP_TO_UINT by promoting the destination to a larger signed
276 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
277 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
278 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
280 if (Subtarget.is64Bit()) {
281 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
282 // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
283 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
284 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
286 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
287 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
289 } else if (!Subtarget.useSoftFloat()) {
290 // Since AVX is a superset of SSE3, only check for SSE here.
291 if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
292 // Expand FP_TO_UINT into a select.
293 // FIXME: We would like to use a Custom expander here eventually to do
294 // the optimal thing for SSE vs. the default expansion in the legalizer.
295 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
297 // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
298 // With SSE3 we can use fisttpll to convert to a signed i64; without
299 // SSE, we're stuck with a fistpll.
300 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
302 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
305 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
306 if (!X86ScalarSSEf64) {
307 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
308 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
309 if (Subtarget.is64Bit()) {
310 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
311 // Without SSE, i64->f64 goes through memory.
312 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
314 } else if (!Subtarget.is64Bit())
315 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
317 // Scalar integer divide and remainder are lowered to use operations that
318 // produce two results, to match the available instructions. This exposes
319 // the two-result form to trivial CSE, which is able to combine x/y and x%y
320 // into a single instruction.
322 // Scalar integer multiply-high is also lowered to use two-result
323 // operations, to match the available instructions. However, plain multiply
324 // (low) operations are left as Legal, as there are single-result
325 // instructions for this in x86. Using the two-result multiply instructions
326 // when both high and low results are needed must be arranged by dagcombine.
327 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
328 setOperationAction(ISD::MULHS, VT, Expand);
329 setOperationAction(ISD::MULHU, VT, Expand);
330 setOperationAction(ISD::SDIV, VT, Expand);
331 setOperationAction(ISD::UDIV, VT, Expand);
332 setOperationAction(ISD::SREM, VT, Expand);
333 setOperationAction(ISD::UREM, VT, Expand);
336 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
337 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
338 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
339 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
340 setOperationAction(ISD::BR_CC, VT, Expand);
341 setOperationAction(ISD::SELECT_CC, VT, Expand);
343 if (Subtarget.is64Bit())
344 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
345 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
346 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
347 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
348 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
350 setOperationAction(ISD::FREM , MVT::f32 , Expand);
351 setOperationAction(ISD::FREM , MVT::f64 , Expand);
352 setOperationAction(ISD::FREM , MVT::f80 , Expand);
353 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
355 // Promote the i8 variants and force them on up to i32 which has a shorter
357 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
358 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
359 if (!Subtarget.hasBMI()) {
360 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
361 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
362 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
363 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
364 if (Subtarget.is64Bit()) {
365 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
366 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
370 if (Subtarget.hasLZCNT()) {
371 // When promoting the i8 variants, force them to i32 for a shorter
373 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
374 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
376 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
377 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
378 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
379 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
380 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
381 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
382 if (Subtarget.is64Bit()) {
383 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
384 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
388 // Special handling for half-precision floating point conversions.
389 // If we don't have F16C support, then lower half float conversions
390 // into library calls.
391 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
392 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
393 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
396 // There's never any support for operations beyond MVT::f32.
397 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
398 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
399 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
400 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
402 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
403 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
404 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
405 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
406 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
407 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
409 if (Subtarget.hasPOPCNT()) {
410 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
412 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
413 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
414 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
415 if (Subtarget.is64Bit())
416 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
418 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
421 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
423 if (!Subtarget.hasMOVBE())
424 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
426 // These should be promoted to a larger select which is supported.
427 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
428 // X86 wants to expand cmov itself.
429 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
430 setOperationAction(ISD::SELECT, VT, Custom);
431 setOperationAction(ISD::SETCC, VT, Custom);
433 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
434 if (VT == MVT::i64 && !Subtarget.is64Bit())
436 setOperationAction(ISD::SELECT, VT, Custom);
437 setOperationAction(ISD::SETCC, VT, Custom);
440 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
441 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
442 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
444 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
445 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
446 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
447 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
448 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
449 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
450 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
451 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
454 for (auto VT : { MVT::i32, MVT::i64 }) {
455 if (VT == MVT::i64 && !Subtarget.is64Bit())
457 setOperationAction(ISD::ConstantPool , VT, Custom);
458 setOperationAction(ISD::JumpTable , VT, Custom);
459 setOperationAction(ISD::GlobalAddress , VT, Custom);
460 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
461 setOperationAction(ISD::ExternalSymbol , VT, Custom);
462 setOperationAction(ISD::BlockAddress , VT, Custom);
465 // 64-bit shl, sra, srl (iff 32-bit x86)
466 for (auto VT : { MVT::i32, MVT::i64 }) {
467 if (VT == MVT::i64 && !Subtarget.is64Bit())
469 setOperationAction(ISD::SHL_PARTS, VT, Custom);
470 setOperationAction(ISD::SRA_PARTS, VT, Custom);
471 setOperationAction(ISD::SRL_PARTS, VT, Custom);
474 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
475 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
477 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
479 // Expand certain atomics
480 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
481 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
482 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
483 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
484 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
485 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
486 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
487 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
490 if (!Subtarget.is64Bit())
491 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
493 if (Subtarget.hasCmpxchg16b()) {
494 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
497 // FIXME - use subtarget debug flags
498 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
499 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
500 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
501 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
504 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
505 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
507 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
508 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
510 setOperationAction(ISD::TRAP, MVT::Other, Legal);
511 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
513 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
514 setOperationAction(ISD::VASTART , MVT::Other, Custom);
515 setOperationAction(ISD::VAEND , MVT::Other, Expand);
516 bool Is64Bit = Subtarget.is64Bit();
517 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
518 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
520 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
521 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
523 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
525 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
526 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
527 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
529 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
530 // f32 and f64 use SSE.
531 // Set up the FP register classes.
532 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
533 : &X86::FR32RegClass);
534 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
535 : &X86::FR64RegClass);
537 // Disable f32->f64 extload as we can only generate this in one instruction
538 // under optsize. So its easier to pattern match (fpext (load)) for that
539 // case instead of needing to emit 2 instructions for extload in the
541 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
543 for (auto VT : { MVT::f32, MVT::f64 }) {
544 // Use ANDPD to simulate FABS.
545 setOperationAction(ISD::FABS, VT, Custom);
547 // Use XORP to simulate FNEG.
548 setOperationAction(ISD::FNEG, VT, Custom);
550 // Use ANDPD and ORPD to simulate FCOPYSIGN.
551 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
553 // These might be better off as horizontal vector ops.
554 setOperationAction(ISD::FADD, VT, Custom);
555 setOperationAction(ISD::FSUB, VT, Custom);
557 // We don't support sin/cos/fmod
558 setOperationAction(ISD::FSIN , VT, Expand);
559 setOperationAction(ISD::FCOS , VT, Expand);
560 setOperationAction(ISD::FSINCOS, VT, Expand);
563 // Lower this to MOVMSK plus an AND.
564 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
565 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
567 } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
568 // Use SSE for f32, x87 for f64.
569 // Set up the FP register classes.
570 addRegisterClass(MVT::f32, &X86::FR32RegClass);
572 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
574 // Use ANDPS to simulate FABS.
575 setOperationAction(ISD::FABS , MVT::f32, Custom);
577 // Use XORP to simulate FNEG.
578 setOperationAction(ISD::FNEG , MVT::f32, Custom);
581 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
583 // Use ANDPS and ORPS to simulate FCOPYSIGN.
585 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
586 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
588 // We don't support sin/cos/fmod
589 setOperationAction(ISD::FSIN , MVT::f32, Expand);
590 setOperationAction(ISD::FCOS , MVT::f32, Expand);
591 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
594 // Always expand sin/cos functions even though x87 has an instruction.
595 setOperationAction(ISD::FSIN, MVT::f64, Expand);
596 setOperationAction(ISD::FCOS, MVT::f64, Expand);
597 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
600 // f32 and f64 in x87.
601 // Set up the FP register classes.
602 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
603 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
605 for (auto VT : { MVT::f32, MVT::f64 }) {
606 setOperationAction(ISD::UNDEF, VT, Expand);
607 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
609 // Always expand sin/cos functions even though x87 has an instruction.
610 setOperationAction(ISD::FSIN , VT, Expand);
611 setOperationAction(ISD::FCOS , VT, Expand);
612 setOperationAction(ISD::FSINCOS, VT, Expand);
616 // Expand FP32 immediates into loads from the stack, save special cases.
617 if (isTypeLegal(MVT::f32)) {
618 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
619 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
620 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
621 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
622 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
623 } else // SSE immediates.
624 addLegalFPImmediate(APFloat(+0.0f)); // xorps
626 // Expand FP64 immediates into loads from the stack, save special cases.
627 if (isTypeLegal(MVT::f64)) {
628 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
629 addLegalFPImmediate(APFloat(+0.0)); // FLD0
630 addLegalFPImmediate(APFloat(+1.0)); // FLD1
631 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
632 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
633 } else // SSE immediates.
634 addLegalFPImmediate(APFloat(+0.0)); // xorpd
637 // We don't support FMA.
638 setOperationAction(ISD::FMA, MVT::f64, Expand);
639 setOperationAction(ISD::FMA, MVT::f32, Expand);
641 // Long double always uses X87, except f128 in MMX.
643 if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
644 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
645 : &X86::VR128RegClass);
646 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
647 setOperationAction(ISD::FABS , MVT::f128, Custom);
648 setOperationAction(ISD::FNEG , MVT::f128, Custom);
649 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
652 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
653 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
654 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
656 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
657 addLegalFPImmediate(TmpFlt); // FLD0
659 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
662 APFloat TmpFlt2(+1.0);
663 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
665 addLegalFPImmediate(TmpFlt2); // FLD1
666 TmpFlt2.changeSign();
667 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
670 // Always expand sin/cos functions even though x87 has an instruction.
671 setOperationAction(ISD::FSIN , MVT::f80, Expand);
672 setOperationAction(ISD::FCOS , MVT::f80, Expand);
673 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
675 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
676 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
677 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
678 setOperationAction(ISD::FRINT, MVT::f80, Expand);
679 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
680 setOperationAction(ISD::FMA, MVT::f80, Expand);
681 setOperationAction(ISD::LROUND, MVT::f80, Expand);
682 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
683 setOperationAction(ISD::LRINT, MVT::f80, Expand);
684 setOperationAction(ISD::LLRINT, MVT::f80, Expand);
687 // Always use a library call for pow.
688 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
689 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
690 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
692 setOperationAction(ISD::FLOG, MVT::f80, Expand);
693 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
694 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
695 setOperationAction(ISD::FEXP, MVT::f80, Expand);
696 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
697 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
698 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
700 // Some FP actions are always expanded for vector types.
701 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
702 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
703 setOperationAction(ISD::FSIN, VT, Expand);
704 setOperationAction(ISD::FSINCOS, VT, Expand);
705 setOperationAction(ISD::FCOS, VT, Expand);
706 setOperationAction(ISD::FREM, VT, Expand);
707 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
708 setOperationAction(ISD::FPOW, VT, Expand);
709 setOperationAction(ISD::FLOG, VT, Expand);
710 setOperationAction(ISD::FLOG2, VT, Expand);
711 setOperationAction(ISD::FLOG10, VT, Expand);
712 setOperationAction(ISD::FEXP, VT, Expand);
713 setOperationAction(ISD::FEXP2, VT, Expand);
716 // First set operation action for all vector types to either promote
717 // (for widening) or expand (for scalarization). Then we will selectively
718 // turn on ones that can be effectively codegen'd.
719 for (MVT VT : MVT::vector_valuetypes()) {
720 setOperationAction(ISD::SDIV, VT, Expand);
721 setOperationAction(ISD::UDIV, VT, Expand);
722 setOperationAction(ISD::SREM, VT, Expand);
723 setOperationAction(ISD::UREM, VT, Expand);
724 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
725 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
726 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
727 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
728 setOperationAction(ISD::FMA, VT, Expand);
729 setOperationAction(ISD::FFLOOR, VT, Expand);
730 setOperationAction(ISD::FCEIL, VT, Expand);
731 setOperationAction(ISD::FTRUNC, VT, Expand);
732 setOperationAction(ISD::FRINT, VT, Expand);
733 setOperationAction(ISD::FNEARBYINT, VT, Expand);
734 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
735 setOperationAction(ISD::MULHS, VT, Expand);
736 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
737 setOperationAction(ISD::MULHU, VT, Expand);
738 setOperationAction(ISD::SDIVREM, VT, Expand);
739 setOperationAction(ISD::UDIVREM, VT, Expand);
740 setOperationAction(ISD::CTPOP, VT, Expand);
741 setOperationAction(ISD::CTTZ, VT, Expand);
742 setOperationAction(ISD::CTLZ, VT, Expand);
743 setOperationAction(ISD::ROTL, VT, Expand);
744 setOperationAction(ISD::ROTR, VT, Expand);
745 setOperationAction(ISD::BSWAP, VT, Expand);
746 setOperationAction(ISD::SETCC, VT, Expand);
747 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
748 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
749 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
750 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
751 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
752 setOperationAction(ISD::TRUNCATE, VT, Expand);
753 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
754 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
755 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
756 setOperationAction(ISD::SELECT_CC, VT, Expand);
757 for (MVT InnerVT : MVT::vector_valuetypes()) {
758 setTruncStoreAction(InnerVT, VT, Expand);
760 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
761 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
763 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
764 // types, we have to deal with them whether we ask for Expansion or not.
765 // Setting Expand causes its own optimisation problems though, so leave
767 if (VT.getVectorElementType() == MVT::i1)
768 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
770 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
771 // split/scalarized right now.
772 if (VT.getVectorElementType() == MVT::f16)
773 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
777 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
778 // with -msoft-float, disable use of MMX as well.
779 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
780 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
781 // No operations on x86mmx supported, everything uses intrinsics.
784 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
785 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
786 : &X86::VR128RegClass);
788 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
789 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
790 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
791 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
792 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
793 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
794 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
795 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
796 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
798 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
799 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
802 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
803 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
804 : &X86::VR128RegClass);
806 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
807 // registers cannot be used even for integer operations.
808 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
809 : &X86::VR128RegClass);
810 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
811 : &X86::VR128RegClass);
812 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
813 : &X86::VR128RegClass);
814 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
815 : &X86::VR128RegClass);
817 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
818 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
819 setOperationAction(ISD::SDIV, VT, Custom);
820 setOperationAction(ISD::SREM, VT, Custom);
821 setOperationAction(ISD::UDIV, VT, Custom);
822 setOperationAction(ISD::UREM, VT, Custom);
825 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
826 setOperationAction(ISD::MUL, MVT::v2i16, Custom);
827 setOperationAction(ISD::MUL, MVT::v2i32, Custom);
828 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
829 setOperationAction(ISD::MUL, MVT::v4i16, Custom);
830 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
832 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
833 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
834 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
835 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
836 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
837 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
838 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
839 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
840 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
841 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
842 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
843 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
844 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
846 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
847 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
848 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
849 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
850 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
853 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
854 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
855 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
856 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
857 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
858 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
859 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
860 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
861 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
862 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
863 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
864 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
866 if (!ExperimentalVectorWideningLegalization) {
867 // Use widening instead of promotion.
868 for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8,
869 MVT::v4i16, MVT::v2i16 }) {
870 setOperationAction(ISD::UADDSAT, VT, Custom);
871 setOperationAction(ISD::SADDSAT, VT, Custom);
872 setOperationAction(ISD::USUBSAT, VT, Custom);
873 setOperationAction(ISD::SSUBSAT, VT, Custom);
877 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
878 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
879 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
881 // Provide custom widening for v2f32 setcc. This is really for VLX when
882 // setcc result type returns v2i1/v4i1 vector for v2f32/v4f32 leading to
883 // type legalization changing the result type to v4i1 during widening.
884 // It works fine for SSE2 and is probably faster so no need to qualify with
886 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
888 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
889 setOperationAction(ISD::SETCC, VT, Custom);
890 setOperationAction(ISD::CTPOP, VT, Custom);
891 setOperationAction(ISD::ABS, VT, Custom);
893 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
894 // setcc all the way to isel and prefer SETGT in some isel patterns.
895 setCondCodeAction(ISD::SETLT, VT, Custom);
896 setCondCodeAction(ISD::SETLE, VT, Custom);
899 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
900 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
901 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
902 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
903 setOperationAction(ISD::VSELECT, VT, Custom);
904 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
907 // We support custom legalizing of sext and anyext loads for specific
908 // memory vector types which we can load as a scalar (or sequence of
909 // scalars) and extend in-register to a legal 128-bit vector type. For sext
910 // loads these must work with a single scalar load.
911 for (MVT VT : MVT::integer_vector_valuetypes()) {
912 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
913 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
914 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
915 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
916 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
917 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
920 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
921 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
922 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
923 setOperationAction(ISD::VSELECT, VT, Custom);
925 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
928 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
932 // Custom lower v2i64 and v2f64 selects.
933 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
934 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
935 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
936 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
937 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
939 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
940 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
941 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
943 // Custom legalize these to avoid over promotion or custom promotion.
944 setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom);
945 setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom);
946 setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom);
947 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
948 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
949 setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom);
950 setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom);
951 setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom);
952 setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom);
953 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
955 // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
956 // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
957 // split again based on the input type, this will cause an AssertSExt i16 to
958 // be emitted instead of an AssertZExt. This will allow packssdw followed by
959 // packuswb to be used to truncate to v8i8. This is necessary since packusdw
960 // isn't available until sse4.1.
961 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
963 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
964 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
966 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
968 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
969 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
971 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
972 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
974 // We want to legalize this to an f64 load rather than an i64 load on
975 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
977 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
978 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
979 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
980 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
981 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
982 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
984 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
985 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
986 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
987 if (!Subtarget.hasAVX512())
988 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
990 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
991 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
992 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
994 if (ExperimentalVectorWideningLegalization) {
995 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
997 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
998 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
999 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1000 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1001 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1002 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1004 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
1007 // In the customized shift lowering, the legal v4i32/v2i64 cases
1008 // in AVX2 will be recognized.
1009 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1010 setOperationAction(ISD::SRL, VT, Custom);
1011 setOperationAction(ISD::SHL, VT, Custom);
1012 setOperationAction(ISD::SRA, VT, Custom);
1015 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1016 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1018 // With AVX512, expanding (and promoting the shifts) is better.
1019 if (!Subtarget.hasAVX512())
1020 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1023 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1024 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1025 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1026 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1027 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1028 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1029 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1030 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1031 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1033 // These might be better off as horizontal vector ops.
1034 setOperationAction(ISD::ADD, MVT::i16, Custom);
1035 setOperationAction(ISD::ADD, MVT::i32, Custom);
1036 setOperationAction(ISD::SUB, MVT::i16, Custom);
1037 setOperationAction(ISD::SUB, MVT::i32, Custom);
1040 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1041 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1042 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1043 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1044 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1045 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1046 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1049 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1050 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1051 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1052 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1053 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1054 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1055 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1056 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1058 // FIXME: Do we need to handle scalar-to-vector here?
1059 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1061 // We directly match byte blends in the backend as they match the VSELECT
1063 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1065 // SSE41 brings specific instructions for doing vector sign extend even in
1066 // cases where we don't have SRA.
1067 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1068 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1069 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1072 if (!ExperimentalVectorWideningLegalization) {
1073 // Avoid narrow result types when widening. The legal types are listed
1074 // in the next loop.
1075 for (MVT VT : MVT::integer_vector_valuetypes()) {
1076 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1077 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1078 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1082 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1083 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1084 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1085 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1086 if (!ExperimentalVectorWideningLegalization)
1087 setLoadExtAction(LoadExtOp, MVT::v2i32, MVT::v2i8, Legal);
1088 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1089 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1090 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1091 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1094 // i8 vectors are custom because the source register and source
1095 // source memory operand types are not the same width.
1096 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1099 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1100 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1101 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1102 setOperationAction(ISD::ROTL, VT, Custom);
1104 // XOP can efficiently perform BITREVERSE with VPPERM.
1105 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1106 setOperationAction(ISD::BITREVERSE, VT, Custom);
1108 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1109 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1110 setOperationAction(ISD::BITREVERSE, VT, Custom);
1113 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1114 bool HasInt256 = Subtarget.hasInt256();
1116 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1117 : &X86::VR256RegClass);
1118 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1119 : &X86::VR256RegClass);
1120 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1121 : &X86::VR256RegClass);
1122 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1123 : &X86::VR256RegClass);
1124 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1125 : &X86::VR256RegClass);
1126 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1127 : &X86::VR256RegClass);
1129 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1130 setOperationAction(ISD::FFLOOR, VT, Legal);
1131 setOperationAction(ISD::FCEIL, VT, Legal);
1132 setOperationAction(ISD::FTRUNC, VT, Legal);
1133 setOperationAction(ISD::FRINT, VT, Legal);
1134 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1135 setOperationAction(ISD::FNEG, VT, Custom);
1136 setOperationAction(ISD::FABS, VT, Custom);
1137 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1140 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1141 // even though v8i16 is a legal type.
1142 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1143 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1144 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1146 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1148 if (!Subtarget.hasAVX512())
1149 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1151 // In the customized shift lowering, the legal v8i32/v4i64 cases
1152 // in AVX2 will be recognized.
1153 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1154 setOperationAction(ISD::SRL, VT, Custom);
1155 setOperationAction(ISD::SHL, VT, Custom);
1156 setOperationAction(ISD::SRA, VT, Custom);
1159 // These types need custom splitting if their input is a 128-bit vector.
1160 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1161 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1162 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1163 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1165 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1166 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1168 // With BWI, expanding (and promoting the shifts) is the better.
1169 if (!Subtarget.hasBWI())
1170 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1172 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1173 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1174 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1175 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1176 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1177 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1179 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1180 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1181 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1182 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1185 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1186 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1187 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1188 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1190 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1191 setOperationAction(ISD::SETCC, VT, Custom);
1192 setOperationAction(ISD::CTPOP, VT, Custom);
1193 setOperationAction(ISD::CTLZ, VT, Custom);
1195 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1196 // setcc all the way to isel and prefer SETGT in some isel patterns.
1197 setCondCodeAction(ISD::SETLT, VT, Custom);
1198 setCondCodeAction(ISD::SETLE, VT, Custom);
1201 if (Subtarget.hasAnyFMA()) {
1202 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1203 MVT::v2f64, MVT::v4f64 })
1204 setOperationAction(ISD::FMA, VT, Legal);
1207 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1208 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1209 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1212 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1213 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1214 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1215 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1217 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1218 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1219 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1220 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1221 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1222 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1224 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1225 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1226 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1227 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1228 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1230 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1231 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1232 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1233 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1234 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1235 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1236 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1237 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1239 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1240 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1241 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1242 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1243 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1244 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1247 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1248 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1249 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1253 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1254 // when we have a 256bit-wide blend with immediate.
1255 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1257 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1258 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1259 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1260 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1261 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1262 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1263 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1264 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1268 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1269 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1270 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1271 setOperationAction(ISD::MSTORE, VT, Legal);
1274 // Extract subvector is special because the value type
1275 // (result) is 128-bit but the source is 256-bit wide.
1276 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1277 MVT::v4f32, MVT::v2f64 }) {
1278 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1281 // Custom lower several nodes for 256-bit types.
1282 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1283 MVT::v8f32, MVT::v4f64 }) {
1284 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1285 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1286 setOperationAction(ISD::VSELECT, VT, Custom);
1287 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1288 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1289 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1290 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1291 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1292 setOperationAction(ISD::STORE, VT, Custom);
1296 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // Custom legalize 2x32 to get a little better code.
1300 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1301 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1303 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1304 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1305 setOperationAction(ISD::MGATHER, VT, Custom);
1309 // This block controls legalization of the mask vector sizes that are
1310 // available with AVX512. 512-bit vectors are in a separate block controlled
1311 // by useAVX512Regs.
1312 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1313 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1314 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1315 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1316 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1317 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1319 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1320 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1321 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1323 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1324 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1325 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1326 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1327 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1328 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1330 // There is no byte sized k-register load or store without AVX512DQ.
1331 if (!Subtarget.hasDQI()) {
1332 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1333 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1334 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1335 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1337 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1338 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1339 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1340 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1343 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1344 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1345 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1346 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1347 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1350 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1351 setOperationAction(ISD::ADD, VT, Custom);
1352 setOperationAction(ISD::SUB, VT, Custom);
1353 setOperationAction(ISD::MUL, VT, Custom);
1354 setOperationAction(ISD::SETCC, VT, Custom);
1355 setOperationAction(ISD::SELECT, VT, Custom);
1356 setOperationAction(ISD::TRUNCATE, VT, Custom);
1357 setOperationAction(ISD::UADDSAT, VT, Custom);
1358 setOperationAction(ISD::SADDSAT, VT, Custom);
1359 setOperationAction(ISD::USUBSAT, VT, Custom);
1360 setOperationAction(ISD::SSUBSAT, VT, Custom);
1362 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1363 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1364 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1365 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1366 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1367 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1368 setOperationAction(ISD::VSELECT, VT, Expand);
1371 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1372 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1375 // This block controls legalization for 512-bit operations with 32/64 bit
1376 // elements. 512-bits can be disabled based on prefer-vector-width and
1377 // required-vector-width function attributes.
1378 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1379 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1380 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1381 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1382 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1384 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1385 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1386 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1387 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1388 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1389 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1392 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1393 setOperationAction(ISD::FNEG, VT, Custom);
1394 setOperationAction(ISD::FABS, VT, Custom);
1395 setOperationAction(ISD::FMA, VT, Legal);
1396 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1399 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1400 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
1401 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
1402 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
1403 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1404 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
1405 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
1406 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
1407 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1408 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1410 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1411 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1412 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1413 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1414 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1416 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1417 // to 512-bit rather than use the AVX2 instructions so that we can use
1419 if (!Subtarget.hasVLX()) {
1420 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1421 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1422 setOperationAction(ISD::MLOAD, VT, Custom);
1423 setOperationAction(ISD::MSTORE, VT, Custom);
1427 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1428 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1429 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1430 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1431 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1432 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1433 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1434 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1436 if (ExperimentalVectorWideningLegalization) {
1437 // Need to custom widen this if we don't have AVX512BW.
1438 setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
1439 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
1440 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
1443 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1444 setOperationAction(ISD::FFLOOR, VT, Legal);
1445 setOperationAction(ISD::FCEIL, VT, Legal);
1446 setOperationAction(ISD::FTRUNC, VT, Legal);
1447 setOperationAction(ISD::FRINT, VT, Legal);
1448 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1450 setOperationAction(ISD::SELECT, VT, Custom);
1453 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1454 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1455 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1456 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1459 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1460 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1461 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1462 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1464 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1465 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1467 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1468 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1470 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1471 setOperationAction(ISD::SMAX, VT, Legal);
1472 setOperationAction(ISD::UMAX, VT, Legal);
1473 setOperationAction(ISD::SMIN, VT, Legal);
1474 setOperationAction(ISD::UMIN, VT, Legal);
1475 setOperationAction(ISD::ABS, VT, Legal);
1476 setOperationAction(ISD::SRL, VT, Custom);
1477 setOperationAction(ISD::SHL, VT, Custom);
1478 setOperationAction(ISD::SRA, VT, Custom);
1479 setOperationAction(ISD::CTPOP, VT, Custom);
1480 setOperationAction(ISD::ROTL, VT, Custom);
1481 setOperationAction(ISD::ROTR, VT, Custom);
1482 setOperationAction(ISD::SETCC, VT, Custom);
1483 setOperationAction(ISD::SELECT, VT, Custom);
1485 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1486 // setcc all the way to isel and prefer SETGT in some isel patterns.
1487 setCondCodeAction(ISD::SETLT, VT, Custom);
1488 setCondCodeAction(ISD::SETLE, VT, Custom);
1491 if (Subtarget.hasDQI()) {
1492 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1493 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1494 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1495 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1497 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1500 if (Subtarget.hasCDI()) {
1501 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1502 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1503 setOperationAction(ISD::CTLZ, VT, Legal);
1505 } // Subtarget.hasCDI()
1507 if (Subtarget.hasVPOPCNTDQ()) {
1508 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1509 setOperationAction(ISD::CTPOP, VT, Legal);
1512 // Extract subvector is special because the value type
1513 // (result) is 256-bit but the source is 512-bit wide.
1514 // 128-bit was made Legal under AVX1.
1515 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1516 MVT::v8f32, MVT::v4f64 })
1517 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1519 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1520 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1521 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1522 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1523 setOperationAction(ISD::VSELECT, VT, Custom);
1524 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1525 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1526 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1527 setOperationAction(ISD::MLOAD, VT, Legal);
1528 setOperationAction(ISD::MSTORE, VT, Legal);
1529 setOperationAction(ISD::MGATHER, VT, Custom);
1530 setOperationAction(ISD::MSCATTER, VT, Custom);
1532 // Need to custom split v32i16/v64i8 bitcasts.
1533 if (!Subtarget.hasBWI()) {
1534 setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1535 setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
1538 if (Subtarget.hasVBMI2()) {
1539 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1540 setOperationAction(ISD::FSHL, VT, Custom);
1541 setOperationAction(ISD::FSHR, VT, Custom);
1546 // This block controls legalization for operations that don't have
1547 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1549 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1550 // These operations are handled on non-VLX by artificially widening in
1552 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1554 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1555 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1556 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1557 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1558 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1560 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1561 setOperationAction(ISD::SMAX, VT, Legal);
1562 setOperationAction(ISD::UMAX, VT, Legal);
1563 setOperationAction(ISD::SMIN, VT, Legal);
1564 setOperationAction(ISD::UMIN, VT, Legal);
1565 setOperationAction(ISD::ABS, VT, Legal);
1568 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1569 setOperationAction(ISD::ROTL, VT, Custom);
1570 setOperationAction(ISD::ROTR, VT, Custom);
1573 // Custom legalize 2x32 to get a little better code.
1574 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1575 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1577 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1578 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1579 setOperationAction(ISD::MSCATTER, VT, Custom);
1581 if (Subtarget.hasDQI()) {
1582 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1583 setOperationAction(ISD::SINT_TO_FP, VT, Legal);
1584 setOperationAction(ISD::UINT_TO_FP, VT, Legal);
1585 setOperationAction(ISD::FP_TO_SINT, VT, Legal);
1586 setOperationAction(ISD::FP_TO_UINT, VT, Legal);
1588 setOperationAction(ISD::MUL, VT, Legal);
1592 if (Subtarget.hasCDI()) {
1593 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1594 setOperationAction(ISD::CTLZ, VT, Legal);
1596 } // Subtarget.hasCDI()
1598 if (Subtarget.hasVPOPCNTDQ()) {
1599 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1600 setOperationAction(ISD::CTPOP, VT, Legal);
1604 // This block control legalization of v32i1/v64i1 which are available with
1605 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1607 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1608 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1609 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1611 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1612 setOperationAction(ISD::ADD, VT, Custom);
1613 setOperationAction(ISD::SUB, VT, Custom);
1614 setOperationAction(ISD::MUL, VT, Custom);
1615 setOperationAction(ISD::VSELECT, VT, Expand);
1616 setOperationAction(ISD::UADDSAT, VT, Custom);
1617 setOperationAction(ISD::SADDSAT, VT, Custom);
1618 setOperationAction(ISD::USUBSAT, VT, Custom);
1619 setOperationAction(ISD::SSUBSAT, VT, Custom);
1621 setOperationAction(ISD::TRUNCATE, VT, Custom);
1622 setOperationAction(ISD::SETCC, VT, Custom);
1623 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1624 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1625 setOperationAction(ISD::SELECT, VT, Custom);
1626 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1627 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1630 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1631 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1632 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1633 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1634 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1635 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1637 // Extends from v32i1 masks to 256-bit vectors.
1638 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1639 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1640 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1643 // This block controls legalization for v32i16 and v64i8. 512-bits can be
1644 // disabled based on prefer-vector-width and required-vector-width function
1646 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1647 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1648 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1650 // Extends from v64i1 masks to 512-bit vectors.
1651 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1652 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1653 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1655 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1656 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1657 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1658 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1659 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1660 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1661 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1662 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1663 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1664 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1665 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1666 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1667 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1668 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1669 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1670 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1671 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1672 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1673 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1674 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1675 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1676 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1677 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1679 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1680 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1682 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1684 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1685 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1686 setOperationAction(ISD::VSELECT, VT, Custom);
1687 setOperationAction(ISD::ABS, VT, Legal);
1688 setOperationAction(ISD::SRL, VT, Custom);
1689 setOperationAction(ISD::SHL, VT, Custom);
1690 setOperationAction(ISD::SRA, VT, Custom);
1691 setOperationAction(ISD::MLOAD, VT, Legal);
1692 setOperationAction(ISD::MSTORE, VT, Legal);
1693 setOperationAction(ISD::CTPOP, VT, Custom);
1694 setOperationAction(ISD::CTLZ, VT, Custom);
1695 setOperationAction(ISD::SMAX, VT, Legal);
1696 setOperationAction(ISD::UMAX, VT, Legal);
1697 setOperationAction(ISD::SMIN, VT, Legal);
1698 setOperationAction(ISD::UMIN, VT, Legal);
1699 setOperationAction(ISD::SETCC, VT, Custom);
1700 setOperationAction(ISD::UADDSAT, VT, Legal);
1701 setOperationAction(ISD::SADDSAT, VT, Legal);
1702 setOperationAction(ISD::USUBSAT, VT, Legal);
1703 setOperationAction(ISD::SSUBSAT, VT, Legal);
1704 setOperationAction(ISD::SELECT, VT, Custom);
1706 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1707 // setcc all the way to isel and prefer SETGT in some isel patterns.
1708 setCondCodeAction(ISD::SETLT, VT, Custom);
1709 setCondCodeAction(ISD::SETLE, VT, Custom);
1712 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1713 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1716 if (Subtarget.hasBITALG()) {
1717 for (auto VT : { MVT::v64i8, MVT::v32i16 })
1718 setOperationAction(ISD::CTPOP, VT, Legal);
1721 if (Subtarget.hasVBMI2()) {
1722 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1723 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1727 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1728 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1729 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1730 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1733 // These operations are handled on non-VLX by artificially widening in
1735 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1737 if (Subtarget.hasBITALG()) {
1738 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1739 setOperationAction(ISD::CTPOP, VT, Legal);
1743 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1744 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1745 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1746 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1747 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1748 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1750 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1751 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1752 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1753 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1754 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1756 if (Subtarget.hasDQI()) {
1757 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1758 // v2f32 UINT_TO_FP is already custom under SSE2.
1759 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1760 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1761 "Unexpected operation action!");
1762 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1763 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1764 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1767 if (Subtarget.hasBWI()) {
1768 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1769 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1772 if (Subtarget.hasVBMI2()) {
1773 // TODO: Make these legal even without VLX?
1774 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1775 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1776 setOperationAction(ISD::FSHL, VT, Custom);
1777 setOperationAction(ISD::FSHR, VT, Custom);
1782 // We want to custom lower some of our intrinsics.
1783 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1784 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1785 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1786 if (!Subtarget.is64Bit()) {
1787 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1790 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1791 // handle type legalization for these operations here.
1793 // FIXME: We really should do custom legalization for addition and
1794 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1795 // than generic legalization for 64-bit multiplication-with-overflow, though.
1796 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1797 if (VT == MVT::i64 && !Subtarget.is64Bit())
1799 // Add/Sub/Mul with overflow operations are custom lowered.
1800 setOperationAction(ISD::SADDO, VT, Custom);
1801 setOperationAction(ISD::UADDO, VT, Custom);
1802 setOperationAction(ISD::SSUBO, VT, Custom);
1803 setOperationAction(ISD::USUBO, VT, Custom);
1804 setOperationAction(ISD::SMULO, VT, Custom);
1805 setOperationAction(ISD::UMULO, VT, Custom);
1807 // Support carry in as value rather than glue.
1808 setOperationAction(ISD::ADDCARRY, VT, Custom);
1809 setOperationAction(ISD::SUBCARRY, VT, Custom);
1810 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1813 if (!Subtarget.is64Bit()) {
1814 // These libcalls are not available in 32-bit.
1815 setLibcallName(RTLIB::SHL_I128, nullptr);
1816 setLibcallName(RTLIB::SRL_I128, nullptr);
1817 setLibcallName(RTLIB::SRA_I128, nullptr);
1818 setLibcallName(RTLIB::MUL_I128, nullptr);
1821 // Combine sin / cos into _sincos_stret if it is available.
1822 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1823 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1824 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1825 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1828 if (Subtarget.isTargetWin64()) {
1829 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1830 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1831 setOperationAction(ISD::SREM, MVT::i128, Custom);
1832 setOperationAction(ISD::UREM, MVT::i128, Custom);
1833 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1834 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1837 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1838 // is. We should promote the value to 64-bits to solve this.
1839 // This is what the CRT headers do - `fmodf` is an inline header
1840 // function casting to f64 and calling `fmod`.
1841 if (Subtarget.is32Bit() &&
1842 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1843 for (ISD::NodeType Op :
1844 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1845 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1846 if (isOperationExpand(Op, MVT::f32))
1847 setOperationAction(Op, MVT::f32, Promote);
1849 // We have target-specific dag combine patterns for the following nodes:
1850 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1851 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1852 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1853 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1854 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1855 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1856 setTargetDAGCombine(ISD::BITCAST);
1857 setTargetDAGCombine(ISD::VSELECT);
1858 setTargetDAGCombine(ISD::SELECT);
1859 setTargetDAGCombine(ISD::SHL);
1860 setTargetDAGCombine(ISD::SRA);
1861 setTargetDAGCombine(ISD::SRL);
1862 setTargetDAGCombine(ISD::OR);
1863 setTargetDAGCombine(ISD::AND);
1864 setTargetDAGCombine(ISD::ADD);
1865 setTargetDAGCombine(ISD::FADD);
1866 setTargetDAGCombine(ISD::FSUB);
1867 setTargetDAGCombine(ISD::FNEG);
1868 setTargetDAGCombine(ISD::FMA);
1869 setTargetDAGCombine(ISD::FMINNUM);
1870 setTargetDAGCombine(ISD::FMAXNUM);
1871 setTargetDAGCombine(ISD::SUB);
1872 setTargetDAGCombine(ISD::LOAD);
1873 setTargetDAGCombine(ISD::MLOAD);
1874 setTargetDAGCombine(ISD::STORE);
1875 setTargetDAGCombine(ISD::MSTORE);
1876 setTargetDAGCombine(ISD::TRUNCATE);
1877 setTargetDAGCombine(ISD::ZERO_EXTEND);
1878 setTargetDAGCombine(ISD::ANY_EXTEND);
1879 setTargetDAGCombine(ISD::SIGN_EXTEND);
1880 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1881 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1882 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1883 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1884 setTargetDAGCombine(ISD::SINT_TO_FP);
1885 setTargetDAGCombine(ISD::UINT_TO_FP);
1886 setTargetDAGCombine(ISD::SETCC);
1887 setTargetDAGCombine(ISD::MUL);
1888 setTargetDAGCombine(ISD::XOR);
1889 setTargetDAGCombine(ISD::MSCATTER);
1890 setTargetDAGCombine(ISD::MGATHER);
1892 computeRegisterProperties(Subtarget.getRegisterInfo());
1894 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1895 MaxStoresPerMemsetOptSize = 8;
1896 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1897 MaxStoresPerMemcpyOptSize = 4;
1898 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1899 MaxStoresPerMemmoveOptSize = 4;
1901 // TODO: These control memcmp expansion in CGP and could be raised higher, but
1902 // that needs to benchmarked and balanced with the potential use of vector
1903 // load/store types (PR33329, PR33914).
1904 MaxLoadsPerMemcmp = 2;
1905 MaxLoadsPerMemcmpOptSize = 2;
1907 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
1908 setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
1910 // An out-of-order CPU can speculatively execute past a predictable branch,
1911 // but a conditional move could be stalled by an expensive earlier operation.
1912 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1913 EnableExtLdPromotion = true;
1914 setPrefFunctionAlignment(4); // 2^4 bytes.
1916 verifyIntrinsicTables();
1919 // This has so far only been implemented for 64-bit MachO.
1920 bool X86TargetLowering::useLoadStackGuardNode() const {
1921 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1924 bool X86TargetLowering::useStackGuardXorFP() const {
1925 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
1926 return Subtarget.getTargetTriple().isOSMSVCRT();
1929 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
1930 const SDLoc &DL) const {
1931 EVT PtrTy = getPointerTy(DAG.getDataLayout());
1932 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
1933 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
1934 return SDValue(Node, 0);
1937 TargetLoweringBase::LegalizeTypeAction
1938 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
1939 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1940 return TypeSplitVector;
1942 if (ExperimentalVectorWideningLegalization &&
1943 VT.getVectorNumElements() != 1 &&
1944 VT.getVectorElementType() != MVT::i1)
1945 return TypeWidenVector;
1947 return TargetLoweringBase::getPreferredVectorAction(VT);
1950 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1953 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1955 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1958 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1961 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1963 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1966 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1967 LLVMContext& Context,
1972 if (Subtarget.hasAVX512()) {
1973 const unsigned NumElts = VT.getVectorNumElements();
1975 // Figure out what this type will be legalized to.
1977 while (getTypeAction(Context, LegalVT) != TypeLegal)
1978 LegalVT = getTypeToTransformTo(Context, LegalVT);
1980 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
1981 if (LegalVT.getSimpleVT().is512BitVector())
1982 return EVT::getVectorVT(Context, MVT::i1, NumElts);
1984 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
1985 // If we legalized to less than a 512-bit vector, then we will use a vXi1
1986 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
1988 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
1989 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
1990 return EVT::getVectorVT(Context, MVT::i1, NumElts);
1994 return VT.changeVectorElementTypeToInteger();
1997 /// Helper for getByValTypeAlignment to determine
1998 /// the desired ByVal argument alignment.
1999 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2002 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2003 if (VTy->getBitWidth() == 128)
2005 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2006 unsigned EltAlign = 0;
2007 getMaxByValAlign(ATy->getElementType(), EltAlign);
2008 if (EltAlign > MaxAlign)
2009 MaxAlign = EltAlign;
2010 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2011 for (auto *EltTy : STy->elements()) {
2012 unsigned EltAlign = 0;
2013 getMaxByValAlign(EltTy, EltAlign);
2014 if (EltAlign > MaxAlign)
2015 MaxAlign = EltAlign;
2022 /// Return the desired alignment for ByVal aggregate
2023 /// function arguments in the caller parameter area. For X86, aggregates
2024 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2025 /// are at 4-byte boundaries.
2026 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2027 const DataLayout &DL) const {
2028 if (Subtarget.is64Bit()) {
2029 // Max of 8 and alignment of type.
2030 unsigned TyAlign = DL.getABITypeAlignment(Ty);
2037 if (Subtarget.hasSSE1())
2038 getMaxByValAlign(Ty, Align);
2042 /// Returns the target specific optimal type for load
2043 /// and store operations as a result of memset, memcpy, and memmove
2044 /// lowering. If DstAlign is zero that means it's safe to destination
2045 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2046 /// means there isn't a need to check it against alignment requirement,
2047 /// probably because the source does not need to be loaded. If 'IsMemset' is
2048 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2049 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2050 /// source is constant so it does not need to be loaded.
2051 /// It returns EVT::Other if the type should be determined using generic
2052 /// target-independent logic.
2053 /// For vector ops we check that the overall size isn't larger than our
2054 /// preferred vector width.
2055 EVT X86TargetLowering::getOptimalMemOpType(
2056 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2057 bool ZeroMemset, bool MemcpyStrSrc,
2058 const AttributeList &FuncAttributes) const {
2059 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2060 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2061 ((DstAlign == 0 || DstAlign >= 16) &&
2062 (SrcAlign == 0 || SrcAlign >= 16)))) {
2063 // FIXME: Check if unaligned 32-byte accesses are slow.
2064 if (Size >= 32 && Subtarget.hasAVX() &&
2065 (Subtarget.getPreferVectorWidth() >= 256)) {
2066 // Although this isn't a well-supported type for AVX1, we'll let
2067 // legalization and shuffle lowering produce the optimal codegen. If we
2068 // choose an optimal type with a vector element larger than a byte,
2069 // getMemsetStores() may create an intermediate splat (using an integer
2070 // multiply) before we splat as a vector.
2073 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2075 // TODO: Can SSE1 handle a byte vector?
2076 // If we have SSE1 registers we should be able to use them.
2077 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2078 (Subtarget.getPreferVectorWidth() >= 128))
2080 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2081 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2082 // Do not use f64 to lower memcpy if source is string constant. It's
2083 // better to use i32 to avoid the loads.
2084 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2085 // The gymnastics of splatting a byte value into an XMM register and then
2086 // only using 8-byte stores (because this is a CPU with slow unaligned
2087 // 16-byte accesses) makes that a loser.
2091 // This is a compromise. If we reach here, unaligned accesses may be slow on
2092 // this target. However, creating smaller, aligned accesses could be even
2093 // slower and would certainly be a lot more code.
2094 if (Subtarget.is64Bit() && Size >= 8)
2099 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2101 return X86ScalarSSEf32;
2102 else if (VT == MVT::f64)
2103 return X86ScalarSSEf64;
2107 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2108 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2111 switch (VT.getSizeInBits()) {
2113 // 8-byte and under are always assumed to be fast.
2117 *Fast = !Subtarget.isUnalignedMem16Slow();
2120 *Fast = !Subtarget.isUnalignedMem32Slow();
2122 // TODO: What about AVX-512 (512-bit) accesses?
2125 // NonTemporal vector memory ops must be aligned.
2126 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2127 // NT loads can only be vector aligned, so if its less aligned than the
2128 // minimum vector size (which we can split the vector down to), we might as
2129 // well use a regular unaligned vector load.
2130 // We don't have any NT loads pre-SSE41.
2131 if (!!(Flags & MachineMemOperand::MOLoad))
2132 return (Align < 16 || !Subtarget.hasSSE41());
2135 // Misaligned accesses of any size are always allowed.
2139 /// Return the entry encoding for a jump table in the
2140 /// current function. The returned value is a member of the
2141 /// MachineJumpTableInfo::JTEntryKind enum.
2142 unsigned X86TargetLowering::getJumpTableEncoding() const {
2143 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2145 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2146 return MachineJumpTableInfo::EK_Custom32;
2148 // Otherwise, use the normal jump table encoding heuristics.
2149 return TargetLowering::getJumpTableEncoding();
2152 bool X86TargetLowering::useSoftFloat() const {
2153 return Subtarget.useSoftFloat();
2156 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2157 ArgListTy &Args) const {
2159 // Only relabel X86-32 for C / Stdcall CCs.
2160 if (Subtarget.is64Bit())
2162 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2164 unsigned ParamRegs = 0;
2165 if (auto *M = MF->getFunction().getParent())
2166 ParamRegs = M->getNumberRegisterParameters();
2168 // Mark the first N int arguments as having reg
2169 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2170 Type *T = Args[Idx].Ty;
2171 if (T->isIntOrPtrTy())
2172 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2173 unsigned numRegs = 1;
2174 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2176 if (ParamRegs < numRegs)
2178 ParamRegs -= numRegs;
2179 Args[Idx].IsInReg = true;
2185 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2186 const MachineBasicBlock *MBB,
2187 unsigned uid,MCContext &Ctx) const{
2188 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2189 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2191 return MCSymbolRefExpr::create(MBB->getSymbol(),
2192 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2195 /// Returns relocation base for the given PIC jumptable.
2196 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2197 SelectionDAG &DAG) const {
2198 if (!Subtarget.is64Bit())
2199 // This doesn't have SDLoc associated with it, but is not really the
2200 // same as a Register.
2201 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2202 getPointerTy(DAG.getDataLayout()));
2206 /// This returns the relocation base for the given PIC jumptable,
2207 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2208 const MCExpr *X86TargetLowering::
2209 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2210 MCContext &Ctx) const {
2211 // X86-64 uses RIP relative addressing based on the jump table label.
2212 if (Subtarget.isPICStyleRIPRel())
2213 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2215 // Otherwise, the reference is relative to the PIC base.
2216 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2219 std::pair<const TargetRegisterClass *, uint8_t>
2220 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2222 const TargetRegisterClass *RRC = nullptr;
2224 switch (VT.SimpleTy) {
2226 return TargetLowering::findRepresentativeClass(TRI, VT);
2227 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2228 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2231 RRC = &X86::VR64RegClass;
2233 case MVT::f32: case MVT::f64:
2234 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2235 case MVT::v4f32: case MVT::v2f64:
2236 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2237 case MVT::v8f32: case MVT::v4f64:
2238 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2239 case MVT::v16f32: case MVT::v8f64:
2240 RRC = &X86::VR128XRegClass;
2243 return std::make_pair(RRC, Cost);
2246 unsigned X86TargetLowering::getAddressSpace() const {
2247 if (Subtarget.is64Bit())
2248 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2252 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2253 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2254 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2257 static Constant* SegmentOffset(IRBuilder<> &IRB,
2258 unsigned Offset, unsigned AddressSpace) {
2259 return ConstantExpr::getIntToPtr(
2260 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2261 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2264 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2265 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2266 // tcbhead_t; use it instead of the usual global variable (see
2267 // sysdeps/{i386,x86_64}/nptl/tls.h)
2268 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2269 if (Subtarget.isTargetFuchsia()) {
2270 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2271 return SegmentOffset(IRB, 0x10, getAddressSpace());
2273 // %fs:0x28, unless we're using a Kernel code model, in which case
2274 // it's %gs:0x28. gs:0x14 on i386.
2275 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2276 return SegmentOffset(IRB, Offset, getAddressSpace());
2280 return TargetLowering::getIRStackGuard(IRB);
2283 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2284 // MSVC CRT provides functionalities for stack protection.
2285 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2286 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2287 // MSVC CRT has a global variable holding security cookie.
2288 M.getOrInsertGlobal("__security_cookie",
2289 Type::getInt8PtrTy(M.getContext()));
2291 // MSVC CRT has a function to validate security cookie.
2292 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2293 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2294 Type::getInt8PtrTy(M.getContext()));
2295 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2296 F->setCallingConv(CallingConv::X86_FastCall);
2297 F->addAttribute(1, Attribute::AttrKind::InReg);
2301 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2302 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2304 TargetLowering::insertSSPDeclarations(M);
2307 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2308 // MSVC CRT has a global variable holding security cookie.
2309 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2310 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2311 return M.getGlobalVariable("__security_cookie");
2313 return TargetLowering::getSDagStackGuard(M);
2316 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2317 // MSVC CRT has a function to validate security cookie.
2318 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2319 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2320 return M.getFunction("__security_check_cookie");
2322 return TargetLowering::getSSPStackGuardCheck(M);
2325 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2326 if (Subtarget.getTargetTriple().isOSContiki())
2327 return getDefaultSafeStackPointerLocation(IRB, false);
2329 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2330 // definition of TLS_SLOT_SAFESTACK in
2331 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2332 if (Subtarget.isTargetAndroid()) {
2333 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2335 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2336 return SegmentOffset(IRB, Offset, getAddressSpace());
2339 // Fuchsia is similar.
2340 if (Subtarget.isTargetFuchsia()) {
2341 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2342 return SegmentOffset(IRB, 0x18, getAddressSpace());
2345 return TargetLowering::getSafeStackPointerLocation(IRB);
2348 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2349 unsigned DestAS) const {
2350 assert(SrcAS != DestAS && "Expected different address spaces!");
2352 return SrcAS < 256 && DestAS < 256;
2355 //===----------------------------------------------------------------------===//
2356 // Return Value Calling Convention Implementation
2357 //===----------------------------------------------------------------------===//
2359 bool X86TargetLowering::CanLowerReturn(
2360 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2361 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2362 SmallVector<CCValAssign, 16> RVLocs;
2363 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2364 return CCInfo.CheckReturn(Outs, RetCC_X86);
2367 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2368 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2372 /// Lowers masks values (v*i1) to the local register values
2373 /// \returns DAG node after lowering to register type
2374 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2375 const SDLoc &Dl, SelectionDAG &DAG) {
2376 EVT ValVT = ValArg.getValueType();
2378 if (ValVT == MVT::v1i1)
2379 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2380 DAG.getIntPtrConstant(0, Dl));
2382 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2383 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2384 // Two stage lowering might be required
2385 // bitcast: v8i1 -> i8 / v16i1 -> i16
2386 // anyextend: i8 -> i32 / i16 -> i32
2387 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2388 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2389 if (ValLoc == MVT::i32)
2390 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2394 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2395 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2396 // One stage lowering is required
2397 // bitcast: v32i1 -> i32 / v64i1 -> i64
2398 return DAG.getBitcast(ValLoc, ValArg);
2401 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2404 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2405 static void Passv64i1ArgInRegs(
2406 const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg,
2407 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA,
2408 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2409 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2410 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2411 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2412 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2413 "The value should reside in two registers");
2415 // Before splitting the value we cast it to i64
2416 Arg = DAG.getBitcast(MVT::i64, Arg);
2418 // Splitting the value into two i32 types
2420 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2421 DAG.getConstant(0, Dl, MVT::i32));
2422 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2423 DAG.getConstant(1, Dl, MVT::i32));
2425 // Attach the two i32 types into corresponding registers
2426 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2427 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2431 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2433 const SmallVectorImpl<ISD::OutputArg> &Outs,
2434 const SmallVectorImpl<SDValue> &OutVals,
2435 const SDLoc &dl, SelectionDAG &DAG) const {
2436 MachineFunction &MF = DAG.getMachineFunction();
2437 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2439 // In some cases we need to disable registers from the default CSR list.
2440 // For example, when they are used for argument passing.
2441 bool ShouldDisableCalleeSavedRegister =
2442 CallConv == CallingConv::X86_RegCall ||
2443 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2445 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2446 report_fatal_error("X86 interrupts may not return any value");
2448 SmallVector<CCValAssign, 16> RVLocs;
2449 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2450 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2453 SmallVector<SDValue, 6> RetOps;
2454 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2455 // Operand #1 = Bytes To Pop
2456 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2459 // Copy the result values into the output registers.
2460 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2462 CCValAssign &VA = RVLocs[I];
2463 assert(VA.isRegLoc() && "Can only return in registers!");
2465 // Add the register to the CalleeSaveDisableRegs list.
2466 if (ShouldDisableCalleeSavedRegister)
2467 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2469 SDValue ValToCopy = OutVals[OutsIndex];
2470 EVT ValVT = ValToCopy.getValueType();
2472 // Promote values to the appropriate types.
2473 if (VA.getLocInfo() == CCValAssign::SExt)
2474 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2475 else if (VA.getLocInfo() == CCValAssign::ZExt)
2476 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2477 else if (VA.getLocInfo() == CCValAssign::AExt) {
2478 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2479 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2481 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2483 else if (VA.getLocInfo() == CCValAssign::BCvt)
2484 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2486 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2487 "Unexpected FP-extend for return value.");
2489 // If this is x86-64, and we disabled SSE, we can't return FP values,
2490 // or SSE or MMX vectors.
2491 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2492 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2493 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2494 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2495 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2496 } else if (ValVT == MVT::f64 &&
2497 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
2498 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2499 // llvm-gcc has never done it right and no one has noticed, so this
2500 // should be OK for now.
2501 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2502 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2505 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2506 // the RET instruction and handled by the FP Stackifier.
2507 if (VA.getLocReg() == X86::FP0 ||
2508 VA.getLocReg() == X86::FP1) {
2509 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2510 // change the value to the FP stack register class.
2511 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2512 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2513 RetOps.push_back(ValToCopy);
2514 // Don't emit a copytoreg.
2518 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2519 // which is returned in RAX / RDX.
2520 if (Subtarget.is64Bit()) {
2521 if (ValVT == MVT::x86mmx) {
2522 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2523 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2524 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2526 // If we don't have SSE2 available, convert to v4f32 so the generated
2527 // register is legal.
2528 if (!Subtarget.hasSSE2())
2529 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2534 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2536 if (VA.needsCustom()) {
2537 assert(VA.getValVT() == MVT::v64i1 &&
2538 "Currently the only custom case is when we split v64i1 to 2 regs");
2540 Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I],
2543 assert(2 == RegsToPass.size() &&
2544 "Expecting two registers after Pass64BitArgInRegs");
2546 // Add the second register to the CalleeSaveDisableRegs list.
2547 if (ShouldDisableCalleeSavedRegister)
2548 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2550 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2553 // Add nodes to the DAG and add the values into the RetOps list
2554 for (auto &Reg : RegsToPass) {
2555 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2556 Flag = Chain.getValue(1);
2557 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2561 // Swift calling convention does not require we copy the sret argument
2562 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2564 // All x86 ABIs require that for returning structs by value we copy
2565 // the sret argument into %rax/%eax (depending on ABI) for the return.
2566 // We saved the argument into a virtual register in the entry block,
2567 // so now we copy the value out and into %rax/%eax.
2569 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2570 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2571 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2572 // either case FuncInfo->setSRetReturnReg() will have been called.
2573 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2574 // When we have both sret and another return value, we should use the
2575 // original Chain stored in RetOps[0], instead of the current Chain updated
2576 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2578 // For the case of sret and another return value, we have
2579 // Chain_0 at the function entry
2580 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2581 // If we use Chain_1 in getCopyFromReg, we will have
2582 // Val = getCopyFromReg(Chain_1)
2583 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2585 // getCopyToReg(Chain_0) will be glued together with
2586 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2587 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2588 // Data dependency from Unit B to Unit A due to usage of Val in
2589 // getCopyToReg(Chain_1, Val)
2590 // Chain dependency from Unit A to Unit B
2592 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2593 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2594 getPointerTy(MF.getDataLayout()));
2597 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2598 X86::RAX : X86::EAX;
2599 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2600 Flag = Chain.getValue(1);
2602 // RAX/EAX now acts like a return value.
2604 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2606 // Add the returned register to the CalleeSaveDisableRegs list.
2607 if (ShouldDisableCalleeSavedRegister)
2608 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2611 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2612 const MCPhysReg *I =
2613 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2616 if (X86::GR64RegClass.contains(*I))
2617 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2619 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2623 RetOps[0] = Chain; // Update chain.
2625 // Add the flag if we have it.
2627 RetOps.push_back(Flag);
2629 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2630 if (CallConv == CallingConv::X86_INTR)
2631 opcode = X86ISD::IRET;
2632 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2635 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2636 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2639 SDValue TCChain = Chain;
2640 SDNode *Copy = *N->use_begin();
2641 if (Copy->getOpcode() == ISD::CopyToReg) {
2642 // If the copy has a glue operand, we conservatively assume it isn't safe to
2643 // perform a tail call.
2644 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2646 TCChain = Copy->getOperand(0);
2647 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2650 bool HasRet = false;
2651 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2653 if (UI->getOpcode() != X86ISD::RET_FLAG)
2655 // If we are returning more than one value, we can definitely
2656 // not make a tail call see PR19530
2657 if (UI->getNumOperands() > 4)
2659 if (UI->getNumOperands() == 4 &&
2660 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2672 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2673 ISD::NodeType ExtendKind) const {
2674 MVT ReturnMVT = MVT::i32;
2676 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2677 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2678 // The ABI does not require i1, i8 or i16 to be extended.
2680 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2681 // always extending i8/i16 return values, so keep doing that for now.
2683 ReturnMVT = MVT::i8;
2686 EVT MinVT = getRegisterType(Context, ReturnMVT);
2687 return VT.bitsLT(MinVT) ? MinVT : VT;
2690 /// Reads two 32 bit registers and creates a 64 bit mask value.
2691 /// \param VA The current 32 bit value that need to be assigned.
2692 /// \param NextVA The next 32 bit value that need to be assigned.
2693 /// \param Root The parent DAG node.
2694 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2695 /// glue purposes. In the case the DAG is already using
2696 /// physical register instead of virtual, we should glue
2697 /// our new SDValue to InFlag SDvalue.
2698 /// \return a new SDvalue of size 64bit.
2699 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2700 SDValue &Root, SelectionDAG &DAG,
2701 const SDLoc &Dl, const X86Subtarget &Subtarget,
2702 SDValue *InFlag = nullptr) {
2703 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2704 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2705 assert(VA.getValVT() == MVT::v64i1 &&
2706 "Expecting first location of 64 bit width type");
2707 assert(NextVA.getValVT() == VA.getValVT() &&
2708 "The locations should have the same type");
2709 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2710 "The values should reside in two registers");
2713 SDValue ArgValueLo, ArgValueHi;
2715 MachineFunction &MF = DAG.getMachineFunction();
2716 const TargetRegisterClass *RC = &X86::GR32RegClass;
2718 // Read a 32 bit value from the registers.
2719 if (nullptr == InFlag) {
2720 // When no physical register is present,
2721 // create an intermediate virtual register.
2722 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2723 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2724 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2725 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2727 // When a physical register is available read the value from it and glue
2728 // the reads together.
2730 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2731 *InFlag = ArgValueLo.getValue(2);
2733 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2734 *InFlag = ArgValueHi.getValue(2);
2737 // Convert the i32 type into v32i1 type.
2738 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2740 // Convert the i32 type into v32i1 type.
2741 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2743 // Concatenate the two values together.
2744 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2747 /// The function will lower a register of various sizes (8/16/32/64)
2748 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2749 /// \returns a DAG node contains the operand after lowering to mask type.
2750 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2751 const EVT &ValLoc, const SDLoc &Dl,
2752 SelectionDAG &DAG) {
2753 SDValue ValReturned = ValArg;
2755 if (ValVT == MVT::v1i1)
2756 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2758 if (ValVT == MVT::v64i1) {
2759 // In 32 bit machine, this case is handled by getv64i1Argument
2760 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2761 // In 64 bit machine, There is no need to truncate the value only bitcast
2764 switch (ValVT.getSimpleVT().SimpleTy) {
2775 llvm_unreachable("Expecting a vector of i1 types");
2778 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2780 return DAG.getBitcast(ValVT, ValReturned);
2783 /// Lower the result values of a call into the
2784 /// appropriate copies out of appropriate physical registers.
2786 SDValue X86TargetLowering::LowerCallResult(
2787 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2788 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2789 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2790 uint32_t *RegMask) const {
2792 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2793 // Assign locations to each value returned by this call.
2794 SmallVector<CCValAssign, 16> RVLocs;
2795 bool Is64Bit = Subtarget.is64Bit();
2796 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2798 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2800 // Copy all of the result registers out of their specified physreg.
2801 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2803 CCValAssign &VA = RVLocs[I];
2804 EVT CopyVT = VA.getLocVT();
2806 // In some calling conventions we need to remove the used registers
2807 // from the register mask.
2809 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2810 SubRegs.isValid(); ++SubRegs)
2811 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
2814 // If this is x86-64, and we disabled SSE, we can't return FP values
2815 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2816 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2817 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2818 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2821 // If we prefer to use the value in xmm registers, copy it out as f80 and
2822 // use a truncate to move it from fp stack reg to xmm reg.
2823 bool RoundAfterCopy = false;
2824 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2825 isScalarFPTypeInSSEReg(VA.getValVT())) {
2826 if (!Subtarget.hasX87())
2827 report_fatal_error("X87 register return with X87 disabled");
2829 RoundAfterCopy = (CopyVT != VA.getLocVT());
2833 if (VA.needsCustom()) {
2834 assert(VA.getValVT() == MVT::v64i1 &&
2835 "Currently the only custom case is when we split v64i1 to 2 regs");
2837 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
2839 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
2841 Val = Chain.getValue(0);
2842 InFlag = Chain.getValue(2);
2846 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2847 // This truncation won't change the value.
2848 DAG.getIntPtrConstant(1, dl));
2850 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
2851 if (VA.getValVT().isVector() &&
2852 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2853 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2854 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2855 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
2857 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2860 InVals.push_back(Val);
2866 //===----------------------------------------------------------------------===//
2867 // C & StdCall & Fast Calling Convention implementation
2868 //===----------------------------------------------------------------------===//
2869 // StdCall calling convention seems to be standard for many Windows' API
2870 // routines and around. It differs from C calling convention just a little:
2871 // callee should clean up the stack, not caller. Symbols should be also
2872 // decorated in some fancy way :) It doesn't support any vector arguments.
2873 // For info on fast calling convention see Fast Calling Convention (tail call)
2874 // implementation LowerX86_32FastCCCallTo.
2876 /// CallIsStructReturn - Determines whether a call uses struct return
2878 enum StructReturnType {
2883 static StructReturnType
2884 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
2886 return NotStructReturn;
2888 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2889 if (!Flags.isSRet())
2890 return NotStructReturn;
2891 if (Flags.isInReg() || IsMCU)
2892 return RegStructReturn;
2893 return StackStructReturn;
2896 /// Determines whether a function uses struct return semantics.
2897 static StructReturnType
2898 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
2900 return NotStructReturn;
2902 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2903 if (!Flags.isSRet())
2904 return NotStructReturn;
2905 if (Flags.isInReg() || IsMCU)
2906 return RegStructReturn;
2907 return StackStructReturn;
2910 /// Make a copy of an aggregate at address specified by "Src" to address
2911 /// "Dst" with size and alignment information specified by the specific
2912 /// parameter attribute. The copy will be passed as a byval function parameter.
2913 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2914 SDValue Chain, ISD::ArgFlagsTy Flags,
2915 SelectionDAG &DAG, const SDLoc &dl) {
2916 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2918 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2919 /*isVolatile*/false, /*AlwaysInline=*/true,
2920 /*isTailCall*/false,
2921 MachinePointerInfo(), MachinePointerInfo());
2924 /// Return true if the calling convention is one that we can guarantee TCO for.
2925 static bool canGuaranteeTCO(CallingConv::ID CC) {
2926 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2927 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
2928 CC == CallingConv::HHVM);
2931 /// Return true if we might ever do TCO for calls with this calling convention.
2932 static bool mayTailCallThisCC(CallingConv::ID CC) {
2934 // C calling conventions:
2935 case CallingConv::C:
2936 case CallingConv::Win64:
2937 case CallingConv::X86_64_SysV:
2938 // Callee pop conventions:
2939 case CallingConv::X86_ThisCall:
2940 case CallingConv::X86_StdCall:
2941 case CallingConv::X86_VectorCall:
2942 case CallingConv::X86_FastCall:
2944 case CallingConv::Swift:
2947 return canGuaranteeTCO(CC);
2951 /// Return true if the function is being made into a tailcall target by
2952 /// changing its ABI.
2953 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2954 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
2957 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2959 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2960 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2963 ImmutableCallSite CS(CI);
2964 CallingConv::ID CalleeCC = CS.getCallingConv();
2965 if (!mayTailCallThisCC(CalleeCC))
2972 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
2973 const SmallVectorImpl<ISD::InputArg> &Ins,
2974 const SDLoc &dl, SelectionDAG &DAG,
2975 const CCValAssign &VA,
2976 MachineFrameInfo &MFI, unsigned i) const {
2977 // Create the nodes corresponding to a load from this parameter slot.
2978 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2979 bool AlwaysUseMutable = shouldGuaranteeTCO(
2980 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2981 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2983 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2985 // If value is passed by pointer we have address passed instead of the value
2986 // itself. No need to extend if the mask value and location share the same
2988 bool ExtendedInMem =
2989 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
2990 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
2992 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
2993 ValVT = VA.getLocVT();
2995 ValVT = VA.getValVT();
2997 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2998 // changed with more analysis.
2999 // In case of tail call optimization mark all arguments mutable. Since they
3000 // could be overwritten by lowering of arguments in case of a tail call.
3001 if (Flags.isByVal()) {
3002 unsigned Bytes = Flags.getByValSize();
3003 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3005 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3006 // can be improved with deeper analysis.
3007 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3008 /*isAliased=*/true);
3009 return DAG.getFrameIndex(FI, PtrVT);
3012 // This is an argument in memory. We might be able to perform copy elision.
3013 // If the argument is passed directly in memory without any extension, then we
3014 // can perform copy elision. Large vector types, for example, may be passed
3015 // indirectly by pointer.
3016 if (Flags.isCopyElisionCandidate() &&
3017 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3018 EVT ArgVT = Ins[i].ArgVT;
3020 if (Ins[i].PartOffset == 0) {
3021 // If this is a one-part value or the first part of a multi-part value,
3022 // create a stack object for the entire argument value type and return a
3023 // load from our portion of it. This assumes that if the first part of an
3024 // argument is in memory, the rest will also be in memory.
3025 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3026 /*IsImmutable=*/false);
3027 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3029 ValVT, dl, Chain, PartAddr,
3030 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3032 // This is not the first piece of an argument in memory. See if there is
3033 // already a fixed stack object including this offset. If so, assume it
3034 // was created by the PartOffset == 0 branch above and create a load from
3035 // the appropriate offset into it.
3036 int64_t PartBegin = VA.getLocMemOffset();
3037 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3038 int FI = MFI.getObjectIndexBegin();
3039 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3040 int64_t ObjBegin = MFI.getObjectOffset(FI);
3041 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3042 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3045 if (MFI.isFixedObjectIndex(FI)) {
3047 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3048 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3050 ValVT, dl, Chain, Addr,
3051 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3052 Ins[i].PartOffset));
3057 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3058 VA.getLocMemOffset(), isImmutable);
3060 // Set SExt or ZExt flag.
3061 if (VA.getLocInfo() == CCValAssign::ZExt) {
3062 MFI.setObjectZExt(FI, true);
3063 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3064 MFI.setObjectSExt(FI, true);
3067 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3068 SDValue Val = DAG.getLoad(
3069 ValVT, dl, Chain, FIN,
3070 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3071 return ExtendedInMem
3072 ? (VA.getValVT().isVector()
3073 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3074 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3078 // FIXME: Get this from tablegen.
3079 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3080 const X86Subtarget &Subtarget) {
3081 assert(Subtarget.is64Bit());
3083 if (Subtarget.isCallingConvWin64(CallConv)) {
3084 static const MCPhysReg GPR64ArgRegsWin64[] = {
3085 X86::RCX, X86::RDX, X86::R8, X86::R9
3087 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3090 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3091 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3093 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3096 // FIXME: Get this from tablegen.
3097 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3098 CallingConv::ID CallConv,
3099 const X86Subtarget &Subtarget) {
3100 assert(Subtarget.is64Bit());
3101 if (Subtarget.isCallingConvWin64(CallConv)) {
3102 // The XMM registers which might contain var arg parameters are shadowed
3103 // in their paired GPR. So we only need to save the GPR to their home
3105 // TODO: __vectorcall will change this.
3109 const Function &F = MF.getFunction();
3110 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3111 bool isSoftFloat = Subtarget.useSoftFloat();
3112 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3113 "SSE register cannot be used when SSE is disabled!");
3114 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3115 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3119 static const MCPhysReg XMMArgRegs64Bit[] = {
3120 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3121 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3123 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3127 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3128 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3129 [](const CCValAssign &A, const CCValAssign &B) -> bool {
3130 return A.getValNo() < B.getValNo();
3135 SDValue X86TargetLowering::LowerFormalArguments(
3136 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3137 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3138 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3139 MachineFunction &MF = DAG.getMachineFunction();
3140 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3141 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3143 const Function &F = MF.getFunction();
3144 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3145 F.getName() == "main")
3146 FuncInfo->setForceFramePointer(true);
3148 MachineFrameInfo &MFI = MF.getFrameInfo();
3149 bool Is64Bit = Subtarget.is64Bit();
3150 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3153 !(isVarArg && canGuaranteeTCO(CallConv)) &&
3154 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3156 // Assign locations to all of the incoming arguments.
3157 SmallVector<CCValAssign, 16> ArgLocs;
3158 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3160 // Allocate shadow area for Win64.
3162 CCInfo.AllocateStack(32, 8);
3164 CCInfo.AnalyzeArguments(Ins, CC_X86);
3166 // In vectorcall calling convention a second pass is required for the HVA
3168 if (CallingConv::X86_VectorCall == CallConv) {
3169 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3172 // The next loop assumes that the locations are in the same order of the
3174 assert(isSortedByValueNo(ArgLocs) &&
3175 "Argument Location list must be sorted before lowering");
3178 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3180 assert(InsIndex < Ins.size() && "Invalid Ins index");
3181 CCValAssign &VA = ArgLocs[I];
3183 if (VA.isRegLoc()) {
3184 EVT RegVT = VA.getLocVT();
3185 if (VA.needsCustom()) {
3187 VA.getValVT() == MVT::v64i1 &&
3188 "Currently the only custom case is when we split v64i1 to 2 regs");
3190 // v64i1 values, in regcall calling convention, that are
3191 // compiled to 32 bit arch, are split up into two registers.
3193 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3195 const TargetRegisterClass *RC;
3196 if (RegVT == MVT::i8)
3197 RC = &X86::GR8RegClass;
3198 else if (RegVT == MVT::i16)
3199 RC = &X86::GR16RegClass;
3200 else if (RegVT == MVT::i32)
3201 RC = &X86::GR32RegClass;
3202 else if (Is64Bit && RegVT == MVT::i64)
3203 RC = &X86::GR64RegClass;
3204 else if (RegVT == MVT::f32)
3205 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3206 else if (RegVT == MVT::f64)
3207 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3208 else if (RegVT == MVT::f80)
3209 RC = &X86::RFP80RegClass;
3210 else if (RegVT == MVT::f128)
3211 RC = &X86::VR128RegClass;
3212 else if (RegVT.is512BitVector())
3213 RC = &X86::VR512RegClass;
3214 else if (RegVT.is256BitVector())
3215 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3216 else if (RegVT.is128BitVector())
3217 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3218 else if (RegVT == MVT::x86mmx)
3219 RC = &X86::VR64RegClass;
3220 else if (RegVT == MVT::v1i1)
3221 RC = &X86::VK1RegClass;
3222 else if (RegVT == MVT::v8i1)
3223 RC = &X86::VK8RegClass;
3224 else if (RegVT == MVT::v16i1)
3225 RC = &X86::VK16RegClass;
3226 else if (RegVT == MVT::v32i1)
3227 RC = &X86::VK32RegClass;
3228 else if (RegVT == MVT::v64i1)
3229 RC = &X86::VK64RegClass;
3231 llvm_unreachable("Unknown argument type!");
3233 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3234 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3237 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3238 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3240 if (VA.getLocInfo() == CCValAssign::SExt)
3241 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3242 DAG.getValueType(VA.getValVT()));
3243 else if (VA.getLocInfo() == CCValAssign::ZExt)
3244 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3245 DAG.getValueType(VA.getValVT()));
3246 else if (VA.getLocInfo() == CCValAssign::BCvt)
3247 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3249 if (VA.isExtInLoc()) {
3250 // Handle MMX values passed in XMM regs.
3251 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3252 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3253 else if (VA.getValVT().isVector() &&
3254 VA.getValVT().getScalarType() == MVT::i1 &&
3255 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3256 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3257 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3258 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3260 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3263 assert(VA.isMemLoc());
3265 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3268 // If value is passed via pointer - do a load.
3269 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3271 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3273 InVals.push_back(ArgValue);
3276 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3277 // Swift calling convention does not require we copy the sret argument
3278 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3279 if (CallConv == CallingConv::Swift)
3282 // All x86 ABIs require that for returning structs by value we copy the
3283 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3284 // the argument into a virtual register so that we can access it from the
3286 if (Ins[I].Flags.isSRet()) {
3287 unsigned Reg = FuncInfo->getSRetReturnReg();
3289 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3290 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3291 FuncInfo->setSRetReturnReg(Reg);
3293 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3294 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3299 unsigned StackSize = CCInfo.getNextStackOffset();
3300 // Align stack specially for tail calls.
3301 if (shouldGuaranteeTCO(CallConv,
3302 MF.getTarget().Options.GuaranteedTailCallOpt))
3303 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3305 // If the function takes variable number of arguments, make a frame index for
3306 // the start of the first vararg value... for expansion of llvm.va_start. We
3307 // can skip this if there are no va_start calls.
3308 if (MFI.hasVAStart() &&
3309 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3310 CallConv != CallingConv::X86_ThisCall))) {
3311 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3314 // Figure out if XMM registers are in use.
3315 assert(!(Subtarget.useSoftFloat() &&
3316 F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3317 "SSE register cannot be used when SSE is disabled!");
3319 // 64-bit calling conventions support varargs and register parameters, so we
3320 // have to do extra work to spill them in the prologue.
3321 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3322 // Find the first unallocated argument registers.
3323 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3324 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3325 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3326 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3327 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3328 "SSE register cannot be used when SSE is disabled!");
3330 // Gather all the live in physical registers.
3331 SmallVector<SDValue, 6> LiveGPRs;
3332 SmallVector<SDValue, 8> LiveXMMRegs;
3334 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3335 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3337 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3339 if (!ArgXMMs.empty()) {
3340 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3341 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3342 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3343 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3344 LiveXMMRegs.push_back(
3345 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3350 // Get to the caller-allocated home save location. Add 8 to account
3351 // for the return address.
3352 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3353 FuncInfo->setRegSaveFrameIndex(
3354 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3355 // Fixup to set vararg frame on shadow area (4 x i64).
3357 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3359 // For X86-64, if there are vararg parameters that are passed via
3360 // registers, then we must store them to their spots on the stack so
3361 // they may be loaded by dereferencing the result of va_next.
3362 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3363 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3364 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3365 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3368 // Store the integer parameter registers.
3369 SmallVector<SDValue, 8> MemOps;
3370 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3371 getPointerTy(DAG.getDataLayout()));
3372 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3373 for (SDValue Val : LiveGPRs) {
3374 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3375 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3377 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3378 MachinePointerInfo::getFixedStack(
3379 DAG.getMachineFunction(),
3380 FuncInfo->getRegSaveFrameIndex(), Offset));
3381 MemOps.push_back(Store);
3385 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3386 // Now store the XMM (fp + vector) parameter registers.
3387 SmallVector<SDValue, 12> SaveXMMOps;
3388 SaveXMMOps.push_back(Chain);
3389 SaveXMMOps.push_back(ALVal);
3390 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3391 FuncInfo->getRegSaveFrameIndex(), dl));
3392 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3393 FuncInfo->getVarArgsFPOffset(), dl));
3394 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3396 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3397 MVT::Other, SaveXMMOps));
3400 if (!MemOps.empty())
3401 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3404 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3405 // Find the largest legal vector type.
3406 MVT VecVT = MVT::Other;
3407 // FIXME: Only some x86_32 calling conventions support AVX512.
3408 if (Subtarget.hasAVX512() &&
3409 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3410 CallConv == CallingConv::Intel_OCL_BI)))
3411 VecVT = MVT::v16f32;
3412 else if (Subtarget.hasAVX())
3414 else if (Subtarget.hasSSE2())
3417 // We forward some GPRs and some vector types.
3418 SmallVector<MVT, 2> RegParmTypes;
3419 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3420 RegParmTypes.push_back(IntVT);
3421 if (VecVT != MVT::Other)
3422 RegParmTypes.push_back(VecVT);
3424 // Compute the set of forwarded registers. The rest are scratch.
3425 SmallVectorImpl<ForwardedRegister> &Forwards =
3426 FuncInfo->getForwardedMustTailRegParms();
3427 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3429 // Conservatively forward AL on x86_64, since it might be used for varargs.
3430 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
3431 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3432 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3435 // Copy all forwards from physical to virtual registers.
3436 for (ForwardedRegister &FR : Forwards) {
3437 // FIXME: Can we use a less constrained schedule?
3438 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3439 FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3440 Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3444 // Some CCs need callee pop.
3445 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3446 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3447 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3448 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3449 // X86 interrupts must pop the error code (and the alignment padding) if
3451 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3453 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3454 // If this is an sret function, the return should pop the hidden pointer.
3455 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3456 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3457 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3458 FuncInfo->setBytesToPopOnReturn(4);
3462 // RegSaveFrameIndex is X86-64 only.
3463 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3464 if (CallConv == CallingConv::X86_FastCall ||
3465 CallConv == CallingConv::X86_ThisCall)
3466 // fastcc functions can't have varargs.
3467 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3470 FuncInfo->setArgumentStackSize(StackSize);
3472 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3473 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3474 if (Personality == EHPersonality::CoreCLR) {
3476 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3477 // that we'd prefer this slot be allocated towards the bottom of the frame
3478 // (i.e. near the stack pointer after allocating the frame). Every
3479 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3480 // offset from the bottom of this and each funclet's frame must be the
3481 // same, so the size of funclets' (mostly empty) frames is dictated by
3482 // how far this slot is from the bottom (since they allocate just enough
3483 // space to accommodate holding this slot at the correct offset).
3484 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3485 EHInfo->PSPSymFrameIdx = PSPSymFI;
3489 if (CallConv == CallingConv::X86_RegCall ||
3490 F.hasFnAttribute("no_caller_saved_registers")) {
3491 MachineRegisterInfo &MRI = MF.getRegInfo();
3492 for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3493 MRI.disableCalleeSavedRegister(Pair.first);
3499 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3500 SDValue Arg, const SDLoc &dl,
3502 const CCValAssign &VA,
3503 ISD::ArgFlagsTy Flags) const {
3504 unsigned LocMemOffset = VA.getLocMemOffset();
3505 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3506 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3508 if (Flags.isByVal())
3509 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3511 return DAG.getStore(
3512 Chain, dl, Arg, PtrOff,
3513 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3516 /// Emit a load of return address if tail call
3517 /// optimization is performed and it is required.
3518 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3519 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3520 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3521 // Adjust the Return address stack slot.
3522 EVT VT = getPointerTy(DAG.getDataLayout());
3523 OutRetAddr = getReturnAddressFrameIndex(DAG);
3525 // Load the "old" Return address.
3526 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3527 return SDValue(OutRetAddr.getNode(), 1);
3530 /// Emit a store of the return address if tail call
3531 /// optimization is performed and it is required (FPDiff!=0).
3532 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3533 SDValue Chain, SDValue RetAddrFrIdx,
3534 EVT PtrVT, unsigned SlotSize,
3535 int FPDiff, const SDLoc &dl) {
3536 // Store the return address to the appropriate stack slot.
3537 if (!FPDiff) return Chain;
3538 // Calculate the new stack slot for the return address.
3539 int NewReturnAddrFI =
3540 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3542 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3543 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3544 MachinePointerInfo::getFixedStack(
3545 DAG.getMachineFunction(), NewReturnAddrFI));
3549 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3550 /// operation of specified width.
3551 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3553 unsigned NumElems = VT.getVectorNumElements();
3554 SmallVector<int, 8> Mask;
3555 Mask.push_back(NumElems);
3556 for (unsigned i = 1; i != NumElems; ++i)
3558 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3562 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3563 SmallVectorImpl<SDValue> &InVals) const {
3564 SelectionDAG &DAG = CLI.DAG;
3566 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3567 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3568 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3569 SDValue Chain = CLI.Chain;
3570 SDValue Callee = CLI.Callee;
3571 CallingConv::ID CallConv = CLI.CallConv;
3572 bool &isTailCall = CLI.IsTailCall;
3573 bool isVarArg = CLI.IsVarArg;
3575 MachineFunction &MF = DAG.getMachineFunction();
3576 bool Is64Bit = Subtarget.is64Bit();
3577 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3578 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3579 bool IsSibcall = false;
3580 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3581 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
3582 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3583 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3584 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3585 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3586 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3588 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3589 const Module *M = MF.getMMI().getModule();
3590 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3592 MachineFunction::CallSiteInfo CSInfo;
3594 if (CallConv == CallingConv::X86_INTR)
3595 report_fatal_error("X86 interrupts may not be called directly");
3597 if (Attr.getValueAsString() == "true")
3600 if (Subtarget.isPICStyleGOT() &&
3601 !MF.getTarget().Options.GuaranteedTailCallOpt) {
3602 // If we are using a GOT, disable tail calls to external symbols with
3603 // default visibility. Tail calling such a symbol requires using a GOT
3604 // relocation, which forces early binding of the symbol. This breaks code
3605 // that require lazy function symbol resolution. Using musttail or
3606 // GuaranteedTailCallOpt will override this.
3607 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3608 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3609 G->getGlobal()->hasDefaultVisibility()))
3613 bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3615 // Force this to be a tail call. The verifier rules are enough to ensure
3616 // that we can lower this successfully without moving the return address
3619 } else if (isTailCall) {
3620 // Check if it's really possible to do a tail call.
3621 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3622 isVarArg, SR != NotStructReturn,
3623 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3624 Outs, OutVals, Ins, DAG);
3626 // Sibcalls are automatically detected tailcalls which do not require
3628 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
3635 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3636 "Var args not supported with calling convention fastcc, ghc or hipe");
3638 // Analyze operands of the call, assigning locations to each operand.
3639 SmallVector<CCValAssign, 16> ArgLocs;
3640 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3642 // Allocate shadow area for Win64.
3644 CCInfo.AllocateStack(32, 8);
3646 CCInfo.AnalyzeArguments(Outs, CC_X86);
3648 // In vectorcall calling convention a second pass is required for the HVA
3650 if (CallingConv::X86_VectorCall == CallConv) {
3651 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3654 // Get a count of how many bytes are to be pushed on the stack.
3655 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3657 // This is a sibcall. The memory operands are available in caller's
3658 // own caller's stack.
3660 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
3661 canGuaranteeTCO(CallConv))
3662 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3665 if (isTailCall && !IsSibcall && !IsMustTail) {
3666 // Lower arguments at fp - stackoffset + fpdiff.
3667 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3669 FPDiff = NumBytesCallerPushed - NumBytes;
3671 // Set the delta of movement of the returnaddr stackslot.
3672 // But only set if delta is greater than previous delta.
3673 if (FPDiff < X86Info->getTCReturnAddrDelta())
3674 X86Info->setTCReturnAddrDelta(FPDiff);
3677 unsigned NumBytesToPush = NumBytes;
3678 unsigned NumBytesToPop = NumBytes;
3680 // If we have an inalloca argument, all stack space has already been allocated
3681 // for us and be right at the top of the stack. We don't support multiple
3682 // arguments passed in memory when using inalloca.
3683 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3685 if (!ArgLocs.back().isMemLoc())
3686 report_fatal_error("cannot use inalloca attribute on a register "
3688 if (ArgLocs.back().getLocMemOffset() != 0)
3689 report_fatal_error("any parameter with the inalloca attribute must be "
3690 "the only memory argument");
3694 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3695 NumBytes - NumBytesToPush, dl);
3697 SDValue RetAddrFrIdx;
3698 // Load return address for tail calls.
3699 if (isTailCall && FPDiff)
3700 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3701 Is64Bit, FPDiff, dl);
3703 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3704 SmallVector<SDValue, 8> MemOpChains;
3707 // The next loop assumes that the locations are in the same order of the
3709 assert(isSortedByValueNo(ArgLocs) &&
3710 "Argument Location list must be sorted before lowering");
3712 // Walk the register/memloc assignments, inserting copies/loads. In the case
3713 // of tail call optimization arguments are handle later.
3714 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3715 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3717 assert(OutIndex < Outs.size() && "Invalid Out index");
3718 // Skip inalloca arguments, they have already been written.
3719 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3720 if (Flags.isInAlloca())
3723 CCValAssign &VA = ArgLocs[I];
3724 EVT RegVT = VA.getLocVT();
3725 SDValue Arg = OutVals[OutIndex];
3726 bool isByVal = Flags.isByVal();
3728 // Promote the value if needed.
3729 switch (VA.getLocInfo()) {
3730 default: llvm_unreachable("Unknown loc info!");
3731 case CCValAssign::Full: break;
3732 case CCValAssign::SExt:
3733 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3735 case CCValAssign::ZExt:
3736 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3738 case CCValAssign::AExt:
3739 if (Arg.getValueType().isVector() &&
3740 Arg.getValueType().getVectorElementType() == MVT::i1)
3741 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3742 else if (RegVT.is128BitVector()) {
3743 // Special case: passing MMX values in XMM registers.
3744 Arg = DAG.getBitcast(MVT::i64, Arg);
3745 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3746 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3748 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3750 case CCValAssign::BCvt:
3751 Arg = DAG.getBitcast(RegVT, Arg);
3753 case CCValAssign::Indirect: {
3755 // Memcpy the argument to a temporary stack slot to prevent
3756 // the caller from seeing any modifications the callee may make
3757 // as guaranteed by the `byval` attribute.
3758 int FrameIdx = MF.getFrameInfo().CreateStackObject(
3759 Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3762 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3764 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3765 // From now on treat this as a regular pointer
3769 // Store the argument.
3770 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3771 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3772 Chain = DAG.getStore(
3773 Chain, dl, Arg, SpillSlot,
3774 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3781 if (VA.needsCustom()) {
3782 assert(VA.getValVT() == MVT::v64i1 &&
3783 "Currently the only custom case is when we split v64i1 to 2 regs");
3784 // Split v64i1 value into two registers
3785 Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I],
3787 } else if (VA.isRegLoc()) {
3788 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3789 const TargetOptions &Options = DAG.getTarget().Options;
3790 if (Options.EnableDebugEntryValues)
3791 CSInfo.emplace_back(VA.getLocReg(), I);
3792 if (isVarArg && IsWin64) {
3793 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3794 // shadow reg if callee is a varargs function.
3795 unsigned ShadowReg = 0;
3796 switch (VA.getLocReg()) {
3797 case X86::XMM0: ShadowReg = X86::RCX; break;
3798 case X86::XMM1: ShadowReg = X86::RDX; break;
3799 case X86::XMM2: ShadowReg = X86::R8; break;
3800 case X86::XMM3: ShadowReg = X86::R9; break;
3803 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3805 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3806 assert(VA.isMemLoc());
3807 if (!StackPtr.getNode())
3808 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3809 getPointerTy(DAG.getDataLayout()));
3810 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3811 dl, DAG, VA, Flags));
3815 if (!MemOpChains.empty())
3816 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3818 if (Subtarget.isPICStyleGOT()) {
3819 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3822 RegsToPass.push_back(std::make_pair(
3823 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3824 getPointerTy(DAG.getDataLayout()))));
3826 // If we are tail calling and generating PIC/GOT style code load the
3827 // address of the callee into ECX. The value in ecx is used as target of
3828 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3829 // for tail calls on PIC/GOT architectures. Normally we would just put the
3830 // address of GOT into ebx and then call target@PLT. But for tail calls
3831 // ebx would be restored (since ebx is callee saved) before jumping to the
3834 // Note: The actual moving to ECX is done further down.
3835 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3836 if (G && !G->getGlobal()->hasLocalLinkage() &&
3837 G->getGlobal()->hasDefaultVisibility())
3838 Callee = LowerGlobalAddress(Callee, DAG);
3839 else if (isa<ExternalSymbolSDNode>(Callee))
3840 Callee = LowerExternalSymbol(Callee, DAG);
3844 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3845 // From AMD64 ABI document:
3846 // For calls that may call functions that use varargs or stdargs
3847 // (prototype-less calls or calls to functions containing ellipsis (...) in
3848 // the declaration) %al is used as hidden argument to specify the number
3849 // of SSE registers used. The contents of %al do not need to match exactly
3850 // the number of registers, but must be an ubound on the number of SSE
3851 // registers used and is in the range 0 - 8 inclusive.
3853 // Count the number of XMM registers allocated.
3854 static const MCPhysReg XMMArgRegs[] = {
3855 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3856 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3858 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3859 assert((Subtarget.hasSSE1() || !NumXMMRegs)
3860 && "SSE registers cannot be used when SSE is disabled");
3862 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3863 DAG.getConstant(NumXMMRegs, dl,
3867 if (isVarArg && IsMustTail) {
3868 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3869 for (const auto &F : Forwards) {
3870 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3871 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3875 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3876 // don't need this because the eligibility check rejects calls that require
3877 // shuffling arguments passed in memory.
3878 if (!IsSibcall && isTailCall) {
3879 // Force all the incoming stack arguments to be loaded from the stack
3880 // before any new outgoing arguments are stored to the stack, because the
3881 // outgoing stack slots may alias the incoming argument stack slots, and
3882 // the alias isn't otherwise explicit. This is slightly more conservative
3883 // than necessary, because it means that each store effectively depends
3884 // on every argument instead of just those arguments it would clobber.
3885 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3887 SmallVector<SDValue, 8> MemOpChains2;
3890 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
3892 CCValAssign &VA = ArgLocs[I];
3894 if (VA.isRegLoc()) {
3895 if (VA.needsCustom()) {
3896 assert((CallConv == CallingConv::X86_RegCall) &&
3897 "Expecting custom case only in regcall calling convention");
3898 // This means that we are in special case where one argument was
3899 // passed through two register locations - Skip the next location
3906 assert(VA.isMemLoc());
3907 SDValue Arg = OutVals[OutsIndex];
3908 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
3909 // Skip inalloca arguments. They don't require any work.
3910 if (Flags.isInAlloca())
3912 // Create frame index.
3913 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3914 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3915 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
3916 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3918 if (Flags.isByVal()) {
3919 // Copy relative to framepointer.
3920 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3921 if (!StackPtr.getNode())
3922 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3923 getPointerTy(DAG.getDataLayout()));
3924 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3927 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3931 // Store relative to framepointer.
3932 MemOpChains2.push_back(DAG.getStore(
3933 ArgChain, dl, Arg, FIN,
3934 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3938 if (!MemOpChains2.empty())
3939 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3941 // Store the return address to the appropriate stack slot.
3942 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3943 getPointerTy(DAG.getDataLayout()),
3944 RegInfo->getSlotSize(), FPDiff, dl);
3947 // Build a sequence of copy-to-reg nodes chained together with token chain
3948 // and flag operands which copy the outgoing args into registers.
3950 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3951 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3952 RegsToPass[i].second, InFlag);
3953 InFlag = Chain.getValue(1);
3956 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3957 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3958 // In the 64-bit large code model, we have to make all calls
3959 // through a register, since the call instruction's 32-bit
3960 // pc-relative offset may not be large enough to hold the whole
3962 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
3963 Callee->getOpcode() == ISD::ExternalSymbol) {
3964 // Lower direct calls to global addresses and external symbols. Setting
3965 // ForCall to true here has the effect of removing WrapperRIP when possible
3966 // to allow direct calls to be selected without first materializing the
3967 // address into a register.
3968 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
3969 } else if (Subtarget.isTarget64BitILP32() &&
3970 Callee->getValueType(0) == MVT::i32) {
3971 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3972 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3975 // Returns a chain & a flag for retval copy to use.
3976 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3977 SmallVector<SDValue, 8> Ops;
3979 if (!IsSibcall && isTailCall) {
3980 Chain = DAG.getCALLSEQ_END(Chain,
3981 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3982 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
3983 InFlag = Chain.getValue(1);
3986 Ops.push_back(Chain);
3987 Ops.push_back(Callee);
3990 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
3992 // Add argument registers to the end of the list so that they are known live
3994 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3995 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3996 RegsToPass[i].second.getValueType()));
3998 // Add a register mask operand representing the call-preserved registers.
3999 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4000 // set X86_INTR calling convention because it has the same CSR mask
4001 // (same preserved registers).
4002 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4003 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4004 assert(Mask && "Missing call preserved mask for calling convention");
4006 // If this is an invoke in a 32-bit function using a funclet-based
4007 // personality, assume the function clobbers all registers. If an exception
4008 // is thrown, the runtime will not restore CSRs.
4009 // FIXME: Model this more precisely so that we can register allocate across
4010 // the normal edge and spill and fill across the exceptional edge.
4011 if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4012 const Function &CallerFn = MF.getFunction();
4013 EHPersonality Pers =
4014 CallerFn.hasPersonalityFn()
4015 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4016 : EHPersonality::Unknown;
4017 if (isFuncletEHPersonality(Pers))
4018 Mask = RegInfo->getNoPreservedMask();
4021 // Define a new register mask from the existing mask.
4022 uint32_t *RegMask = nullptr;
4024 // In some calling conventions we need to remove the used physical registers
4025 // from the reg mask.
4026 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4027 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4029 // Allocate a new Reg Mask and copy Mask.
4030 RegMask = MF.allocateRegMask();
4031 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4032 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4034 // Make sure all sub registers of the argument registers are reset
4036 for (auto const &RegPair : RegsToPass)
4037 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4038 SubRegs.isValid(); ++SubRegs)
4039 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4041 // Create the RegMask Operand according to our updated mask.
4042 Ops.push_back(DAG.getRegisterMask(RegMask));
4044 // Create the RegMask Operand according to the static mask.
4045 Ops.push_back(DAG.getRegisterMask(Mask));
4048 if (InFlag.getNode())
4049 Ops.push_back(InFlag);
4053 //// If this is the first return lowered for this function, add the regs
4054 //// to the liveout set for the function.
4055 // This isn't right, although it's probably harmless on x86; liveouts
4056 // should be computed from returns not tail calls. Consider a void
4057 // function making a tail call to a function returning int.
4058 MF.getFrameInfo().setHasTailCall();
4059 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4060 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4064 if (HasNoCfCheck && IsCFProtectionSupported) {
4065 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4067 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4069 InFlag = Chain.getValue(1);
4070 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4072 // Save heapallocsite metadata.
4074 if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4075 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4077 // Create the CALLSEQ_END node.
4078 unsigned NumBytesForCalleeToPop;
4079 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4080 DAG.getTarget().Options.GuaranteedTailCallOpt))
4081 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4082 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4083 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4084 SR == StackStructReturn)
4085 // If this is a call to a struct-return function, the callee
4086 // pops the hidden struct pointer, so we have to push it back.
4087 // This is common for Darwin/X86, Linux & Mingw32 targets.
4088 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4089 NumBytesForCalleeToPop = 4;
4091 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4093 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4094 // No need to reset the stack after the call if the call doesn't return. To
4095 // make the MI verify, we'll pretend the callee does it for us.
4096 NumBytesForCalleeToPop = NumBytes;
4099 // Returns a flag for retval copy to use.
4101 Chain = DAG.getCALLSEQ_END(Chain,
4102 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4103 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4106 InFlag = Chain.getValue(1);
4109 // Handle result values, copying them out of physregs into vregs that we
4111 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4115 //===----------------------------------------------------------------------===//
4116 // Fast Calling Convention (tail call) implementation
4117 //===----------------------------------------------------------------------===//
4119 // Like std call, callee cleans arguments, convention except that ECX is
4120 // reserved for storing the tail called function address. Only 2 registers are
4121 // free for argument passing (inreg). Tail call optimization is performed
4123 // * tailcallopt is enabled
4124 // * caller/callee are fastcc
4125 // On X86_64 architecture with GOT-style position independent code only local
4126 // (within module) calls are supported at the moment.
4127 // To keep the stack aligned according to platform abi the function
4128 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4129 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4130 // If a tail called function callee has more arguments than the caller the
4131 // caller needs to make sure that there is room to move the RETADDR to. This is
4132 // achieved by reserving an area the size of the argument delta right after the
4133 // original RETADDR, but before the saved framepointer or the spilled registers
4134 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4146 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4149 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
4150 SelectionDAG& DAG) const {
4151 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4152 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
4153 unsigned StackAlignment = TFI.getStackAlignment();
4154 uint64_t AlignMask = StackAlignment - 1;
4155 int64_t Offset = StackSize;
4156 unsigned SlotSize = RegInfo->getSlotSize();
4157 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
4158 // Number smaller than 12 so just add the difference.
4159 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
4161 // Mask out lower bits, add stackalignment once plus the 12 bytes.
4162 Offset = ((~AlignMask) & Offset) + StackAlignment +
4163 (StackAlignment-SlotSize);
4168 /// Return true if the given stack call argument is already available in the
4169 /// same position (relatively) of the caller's incoming argument stack.
4171 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4172 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4173 const X86InstrInfo *TII, const CCValAssign &VA) {
4174 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4177 // Look through nodes that don't alter the bits of the incoming value.
4178 unsigned Op = Arg.getOpcode();
4179 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4180 Arg = Arg.getOperand(0);
4183 if (Op == ISD::TRUNCATE) {
4184 const SDValue &TruncInput = Arg.getOperand(0);
4185 if (TruncInput.getOpcode() == ISD::AssertZext &&
4186 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4187 Arg.getValueType()) {
4188 Arg = TruncInput.getOperand(0);
4196 if (Arg.getOpcode() == ISD::CopyFromReg) {
4197 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4198 if (!TargetRegisterInfo::isVirtualRegister(VR))
4200 MachineInstr *Def = MRI->getVRegDef(VR);
4203 if (!Flags.isByVal()) {
4204 if (!TII->isLoadFromStackSlot(*Def, FI))
4207 unsigned Opcode = Def->getOpcode();
4208 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4209 Opcode == X86::LEA64_32r) &&
4210 Def->getOperand(1).isFI()) {
4211 FI = Def->getOperand(1).getIndex();
4212 Bytes = Flags.getByValSize();
4216 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4217 if (Flags.isByVal())
4218 // ByVal argument is passed in as a pointer but it's now being
4219 // dereferenced. e.g.
4220 // define @foo(%struct.X* %A) {
4221 // tail call @bar(%struct.X* byval %A)
4224 SDValue Ptr = Ld->getBasePtr();
4225 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4228 FI = FINode->getIndex();
4229 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4230 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4231 FI = FINode->getIndex();
4232 Bytes = Flags.getByValSize();
4236 assert(FI != INT_MAX);
4237 if (!MFI.isFixedObjectIndex(FI))
4240 if (Offset != MFI.getObjectOffset(FI))
4243 // If this is not byval, check that the argument stack object is immutable.
4244 // inalloca and argument copy elision can create mutable argument stack
4245 // objects. Byval objects can be mutated, but a byval call intends to pass the
4247 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4250 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4251 // If the argument location is wider than the argument type, check that any
4252 // extension flags match.
4253 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4254 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4259 return Bytes == MFI.getObjectSize(FI);
4262 /// Check whether the call is eligible for tail call optimization. Targets
4263 /// that want to do tail call optimization should implement this function.
4264 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4265 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4266 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4267 const SmallVectorImpl<ISD::OutputArg> &Outs,
4268 const SmallVectorImpl<SDValue> &OutVals,
4269 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4270 if (!mayTailCallThisCC(CalleeCC))
4273 // If -tailcallopt is specified, make fastcc functions tail-callable.
4274 MachineFunction &MF = DAG.getMachineFunction();
4275 const Function &CallerF = MF.getFunction();
4277 // If the function return type is x86_fp80 and the callee return type is not,
4278 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4279 // perform a tailcall optimization here.
4280 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4283 CallingConv::ID CallerCC = CallerF.getCallingConv();
4284 bool CCMatch = CallerCC == CalleeCC;
4285 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4286 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4288 // Win64 functions have extra shadow space for argument homing. Don't do the
4289 // sibcall if the caller and callee have mismatched expectations for this
4291 if (IsCalleeWin64 != IsCallerWin64)
4294 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
4295 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4300 // Look for obvious safe cases to perform tail call optimization that do not
4301 // require ABI changes. This is what gcc calls sibcall.
4303 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4304 // emit a special epilogue.
4305 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4306 if (RegInfo->needsStackRealignment(MF))
4309 // Also avoid sibcall optimization if either caller or callee uses struct
4310 // return semantics.
4311 if (isCalleeStructRet || isCallerStructRet)
4314 // Do not sibcall optimize vararg calls unless all arguments are passed via
4316 LLVMContext &C = *DAG.getContext();
4317 if (isVarArg && !Outs.empty()) {
4318 // Optimizing for varargs on Win64 is unlikely to be safe without
4319 // additional testing.
4320 if (IsCalleeWin64 || IsCallerWin64)
4323 SmallVector<CCValAssign, 16> ArgLocs;
4324 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4326 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4327 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4328 if (!ArgLocs[i].isRegLoc())
4332 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4333 // stack. Therefore, if it's not used by the call it is not safe to optimize
4334 // this into a sibcall.
4335 bool Unused = false;
4336 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4343 SmallVector<CCValAssign, 16> RVLocs;
4344 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4345 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4346 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4347 CCValAssign &VA = RVLocs[i];
4348 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4353 // Check that the call results are passed in the same way.
4354 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4355 RetCC_X86, RetCC_X86))
4357 // The callee has to preserve all registers the caller needs to preserve.
4358 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4359 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4361 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4362 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4366 unsigned StackArgsSize = 0;
4368 // If the callee takes no arguments then go on to check the results of the
4370 if (!Outs.empty()) {
4371 // Check if stack adjustment is needed. For now, do not do this if any
4372 // argument is passed on the stack.
4373 SmallVector<CCValAssign, 16> ArgLocs;
4374 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4376 // Allocate shadow area for Win64
4378 CCInfo.AllocateStack(32, 8);
4380 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4381 StackArgsSize = CCInfo.getNextStackOffset();
4383 if (CCInfo.getNextStackOffset()) {
4384 // Check if the arguments are already laid out in the right way as
4385 // the caller's fixed stack objects.
4386 MachineFrameInfo &MFI = MF.getFrameInfo();
4387 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4388 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4389 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4390 CCValAssign &VA = ArgLocs[i];
4391 SDValue Arg = OutVals[i];
4392 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4393 if (VA.getLocInfo() == CCValAssign::Indirect)
4395 if (!VA.isRegLoc()) {
4396 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4403 bool PositionIndependent = isPositionIndependent();
4404 // If the tailcall address may be in a register, then make sure it's
4405 // possible to register allocate for it. In 32-bit, the call address can
4406 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4407 // callee-saved registers are restored. These happen to be the same
4408 // registers used to pass 'inreg' arguments so watch out for those.
4409 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4410 !isa<ExternalSymbolSDNode>(Callee)) ||
4411 PositionIndependent)) {
4412 unsigned NumInRegs = 0;
4413 // In PIC we need an extra register to formulate the address computation
4415 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4417 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4418 CCValAssign &VA = ArgLocs[i];
4421 unsigned Reg = VA.getLocReg();
4424 case X86::EAX: case X86::EDX: case X86::ECX:
4425 if (++NumInRegs == MaxInRegs)
4432 const MachineRegisterInfo &MRI = MF.getRegInfo();
4433 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4437 bool CalleeWillPop =
4438 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4439 MF.getTarget().Options.GuaranteedTailCallOpt);
4441 if (unsigned BytesToPop =
4442 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4443 // If we have bytes to pop, the callee must pop them.
4444 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4445 if (!CalleePopMatches)
4447 } else if (CalleeWillPop && StackArgsSize > 0) {
4448 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4456 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4457 const TargetLibraryInfo *libInfo) const {
4458 return X86::createFastISel(funcInfo, libInfo);
4461 //===----------------------------------------------------------------------===//
4462 // Other Lowering Hooks
4463 //===----------------------------------------------------------------------===//
4465 static bool MayFoldLoad(SDValue Op) {
4466 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4469 static bool MayFoldIntoStore(SDValue Op) {
4470 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4473 static bool MayFoldIntoZeroExtend(SDValue Op) {
4474 if (Op.hasOneUse()) {
4475 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4476 return (ISD::ZERO_EXTEND == Opcode);
4481 static bool isTargetShuffle(unsigned Opcode) {
4483 default: return false;
4484 case X86ISD::BLENDI:
4485 case X86ISD::PSHUFB:
4486 case X86ISD::PSHUFD:
4487 case X86ISD::PSHUFHW:
4488 case X86ISD::PSHUFLW:
4490 case X86ISD::INSERTPS:
4491 case X86ISD::EXTRQI:
4492 case X86ISD::INSERTQI:
4493 case X86ISD::PALIGNR:
4494 case X86ISD::VSHLDQ:
4495 case X86ISD::VSRLDQ:
4496 case X86ISD::MOVLHPS:
4497 case X86ISD::MOVHLPS:
4498 case X86ISD::MOVSHDUP:
4499 case X86ISD::MOVSLDUP:
4500 case X86ISD::MOVDDUP:
4503 case X86ISD::UNPCKL:
4504 case X86ISD::UNPCKH:
4505 case X86ISD::VBROADCAST:
4506 case X86ISD::VPERMILPI:
4507 case X86ISD::VPERMILPV:
4508 case X86ISD::VPERM2X128:
4509 case X86ISD::SHUF128:
4510 case X86ISD::VPERMIL2:
4511 case X86ISD::VPERMI:
4512 case X86ISD::VPPERM:
4513 case X86ISD::VPERMV:
4514 case X86ISD::VPERMV3:
4515 case X86ISD::VZEXT_MOVL:
4520 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4522 default: return false;
4524 case X86ISD::PSHUFB:
4525 case X86ISD::VPERMILPV:
4526 case X86ISD::VPERMIL2:
4527 case X86ISD::VPPERM:
4528 case X86ISD::VPERMV:
4529 case X86ISD::VPERMV3:
4531 // 'Faux' Target Shuffles.
4539 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4540 MachineFunction &MF = DAG.getMachineFunction();
4541 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4542 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4543 int ReturnAddrIndex = FuncInfo->getRAIndex();
4545 if (ReturnAddrIndex == 0) {
4546 // Set up a frame object for the return address.
4547 unsigned SlotSize = RegInfo->getSlotSize();
4548 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4551 FuncInfo->setRAIndex(ReturnAddrIndex);
4554 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4557 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4558 bool hasSymbolicDisplacement) {
4559 // Offset should fit into 32 bit immediate field.
4560 if (!isInt<32>(Offset))
4563 // If we don't have a symbolic displacement - we don't have any extra
4565 if (!hasSymbolicDisplacement)
4568 // FIXME: Some tweaks might be needed for medium code model.
4569 if (M != CodeModel::Small && M != CodeModel::Kernel)
4572 // For small code model we assume that latest object is 16MB before end of 31
4573 // bits boundary. We may also accept pretty large negative constants knowing
4574 // that all objects are in the positive half of address space.
4575 if (M == CodeModel::Small && Offset < 16*1024*1024)
4578 // For kernel code model we know that all object resist in the negative half
4579 // of 32bits address space. We may not accept negative offsets, since they may
4580 // be just off and we may accept pretty large positive ones.
4581 if (M == CodeModel::Kernel && Offset >= 0)
4587 /// Determines whether the callee is required to pop its own arguments.
4588 /// Callee pop is necessary to support tail calls.
4589 bool X86::isCalleePop(CallingConv::ID CallingConv,
4590 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4591 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4592 // can guarantee TCO.
4593 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4596 switch (CallingConv) {
4599 case CallingConv::X86_StdCall:
4600 case CallingConv::X86_FastCall:
4601 case CallingConv::X86_ThisCall:
4602 case CallingConv::X86_VectorCall:
4607 /// Return true if the condition is an unsigned comparison operation.
4608 static bool isX86CCUnsigned(unsigned X86CC) {
4611 llvm_unreachable("Invalid integer condition!");
4627 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4628 switch (SetCCOpcode) {
4629 default: llvm_unreachable("Invalid integer condition!");
4630 case ISD::SETEQ: return X86::COND_E;
4631 case ISD::SETGT: return X86::COND_G;
4632 case ISD::SETGE: return X86::COND_GE;
4633 case ISD::SETLT: return X86::COND_L;
4634 case ISD::SETLE: return X86::COND_LE;
4635 case ISD::SETNE: return X86::COND_NE;
4636 case ISD::SETULT: return X86::COND_B;
4637 case ISD::SETUGT: return X86::COND_A;
4638 case ISD::SETULE: return X86::COND_BE;
4639 case ISD::SETUGE: return X86::COND_AE;
4643 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4644 /// condition code, returning the condition code and the LHS/RHS of the
4645 /// comparison to make.
4646 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4647 bool isFP, SDValue &LHS, SDValue &RHS,
4648 SelectionDAG &DAG) {
4650 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4651 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4652 // X > -1 -> X == 0, jump !sign.
4653 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4654 return X86::COND_NS;
4656 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4657 // X < 0 -> X == 0, jump on sign.
4660 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
4662 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4663 return X86::COND_LE;
4667 return TranslateIntegerX86CC(SetCCOpcode);
4670 // First determine if it is required or is profitable to flip the operands.
4672 // If LHS is a foldable load, but RHS is not, flip the condition.
4673 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4674 !ISD::isNON_EXTLoad(RHS.getNode())) {
4675 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4676 std::swap(LHS, RHS);
4679 switch (SetCCOpcode) {
4685 std::swap(LHS, RHS);
4689 // On a floating point condition, the flags are set as follows:
4691 // 0 | 0 | 0 | X > Y
4692 // 0 | 0 | 1 | X < Y
4693 // 1 | 0 | 0 | X == Y
4694 // 1 | 1 | 1 | unordered
4695 switch (SetCCOpcode) {
4696 default: llvm_unreachable("Condcode should be pre-legalized away");
4698 case ISD::SETEQ: return X86::COND_E;
4699 case ISD::SETOLT: // flipped
4701 case ISD::SETGT: return X86::COND_A;
4702 case ISD::SETOLE: // flipped
4704 case ISD::SETGE: return X86::COND_AE;
4705 case ISD::SETUGT: // flipped
4707 case ISD::SETLT: return X86::COND_B;
4708 case ISD::SETUGE: // flipped
4710 case ISD::SETLE: return X86::COND_BE;
4712 case ISD::SETNE: return X86::COND_NE;
4713 case ISD::SETUO: return X86::COND_P;
4714 case ISD::SETO: return X86::COND_NP;
4716 case ISD::SETUNE: return X86::COND_INVALID;
4720 /// Is there a floating point cmov for the specific X86 condition code?
4721 /// Current x86 isa includes the following FP cmov instructions:
4722 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4723 static bool hasFPCMov(unsigned X86CC) {
4740 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4742 MachineFunction &MF,
4743 unsigned Intrinsic) const {
4745 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4749 Info.flags = MachineMemOperand::MONone;
4752 switch (IntrData->Type) {
4753 case TRUNCATE_TO_MEM_VI8:
4754 case TRUNCATE_TO_MEM_VI16:
4755 case TRUNCATE_TO_MEM_VI32: {
4756 Info.opc = ISD::INTRINSIC_VOID;
4757 Info.ptrVal = I.getArgOperand(0);
4758 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4759 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4760 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4762 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4763 ScalarVT = MVT::i16;
4764 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4765 ScalarVT = MVT::i32;
4767 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4769 Info.flags |= MachineMemOperand::MOStore;
4774 Info.opc = ISD::INTRINSIC_W_CHAIN;
4775 Info.ptrVal = nullptr;
4776 MVT DataVT = MVT::getVT(I.getType());
4777 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4778 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4779 IndexVT.getVectorNumElements());
4780 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4782 Info.flags |= MachineMemOperand::MOLoad;
4786 Info.opc = ISD::INTRINSIC_VOID;
4787 Info.ptrVal = nullptr;
4788 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4789 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4790 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4791 IndexVT.getVectorNumElements());
4792 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4794 Info.flags |= MachineMemOperand::MOStore;
4804 /// Returns true if the target can instruction select the
4805 /// specified FP immediate natively. If false, the legalizer will
4806 /// materialize the FP immediate as a load from a constant pool.
4807 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4808 bool ForCodeSize) const {
4809 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4810 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4816 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4817 ISD::LoadExtType ExtTy,
4819 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4820 // relocation target a movq or addq instruction: don't let the load shrink.
4821 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4822 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4823 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4824 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4826 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
4827 // those uses are extracted directly into a store, then the extract + store
4828 // can be store-folded. Therefore, it's probably not worth splitting the load.
4829 EVT VT = Load->getValueType(0);
4830 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
4831 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
4832 // Skip uses of the chain value. Result 0 of the node is the load value.
4833 if (UI.getUse().getResNo() != 0)
4836 // If this use is not an extract + store, it's probably worth splitting.
4837 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
4838 UI->use_begin()->getOpcode() != ISD::STORE)
4841 // All non-chain uses are extract + store.
4848 /// Returns true if it is beneficial to convert a load of a constant
4849 /// to just the constant itself.
4850 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4852 assert(Ty->isIntegerTy());
4854 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4855 if (BitSize == 0 || BitSize > 64)
4860 bool X86TargetLowering::reduceSelectOfFPConstantLoads(bool IsFPSetCC) const {
4861 // If we are using XMM registers in the ABI and the condition of the select is
4862 // a floating-point compare and we have blendv or conditional move, then it is
4863 // cheaper to select instead of doing a cross-register move and creating a
4864 // load that depends on the compare result.
4865 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
4868 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
4869 // TODO: It might be a win to ease or lift this restriction, but the generic
4870 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
4871 if (VT.isVector() && Subtarget.hasAVX512())
4877 bool X86TargetLowering::decomposeMulByConstant(EVT VT, SDValue C) const {
4878 // TODO: We handle scalars using custom code, but generic combining could make
4879 // that unnecessary.
4881 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
4884 // If vector multiply is legal, assume that's faster than shl + add/sub.
4885 // TODO: Multiply is a complex op with higher latency and lower througput in
4886 // most implementations, so this check could be loosened based on type
4887 // and/or a CPU attribute.
4888 if (isOperationLegal(ISD::MUL, VT))
4891 // shl+add, shl+sub, shl+add+neg
4892 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
4893 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
4896 bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
4897 bool IsSigned) const {
4898 // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
4899 return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
4902 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
4903 unsigned Index) const {
4904 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4907 // Mask vectors support all subregister combinations and operations that
4908 // extract half of vector.
4909 if (ResVT.getVectorElementType() == MVT::i1)
4910 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
4911 (Index == ResVT.getVectorNumElements()));
4913 return (Index % ResVT.getVectorNumElements()) == 0;
4916 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
4917 unsigned Opc = VecOp.getOpcode();
4919 // Assume target opcodes can't be scalarized.
4920 // TODO - do we have any exceptions?
4921 if (Opc >= ISD::BUILTIN_OP_END)
4924 // If the vector op is not supported, try to convert to scalar.
4925 EVT VecVT = VecOp.getValueType();
4926 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
4929 // If the vector op is supported, but the scalar op is not, the transform may
4930 // not be worthwhile.
4931 EVT ScalarVT = VecVT.getScalarType();
4932 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
4935 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
4936 // TODO: Allow vectors?
4939 return VT.isSimple() || !isOperationExpand(Opcode, VT);
4942 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4943 // Speculate cttz only if we can directly use TZCNT.
4944 return Subtarget.hasBMI();
4947 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
4948 // Speculate ctlz only if we can directly use LZCNT.
4949 return Subtarget.hasLZCNT();
4952 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
4953 const SelectionDAG &DAG,
4954 const MachineMemOperand &MMO) const {
4955 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
4956 BitcastVT.getVectorElementType() == MVT::i1)
4959 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
4962 // If both types are legal vectors, it's always ok to convert them.
4963 if (LoadVT.isVector() && BitcastVT.isVector() &&
4964 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
4967 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
4970 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
4971 const SelectionDAG &DAG) const {
4972 // Do not merge to float value size (128 bytes) if no implicit
4973 // float attribute is set.
4974 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
4975 Attribute::NoImplicitFloat);
4978 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
4979 return (MemVT.getSizeInBits() <= MaxIntSize);
4981 // Make sure we don't merge greater than our preferred vector
4983 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
4988 bool X86TargetLowering::isCtlzFast() const {
4989 return Subtarget.hasFastLZCNT();
4992 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
4993 const Instruction &AndI) const {
4997 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
4998 EVT VT = Y.getValueType();
5003 if (!Subtarget.hasBMI())
5006 // There are only 32-bit and 64-bit forms for 'andn'.
5007 if (VT != MVT::i32 && VT != MVT::i64)
5010 return !isa<ConstantSDNode>(Y);
5013 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5014 EVT VT = Y.getValueType();
5017 return hasAndNotCompare(Y);
5021 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5024 if (VT == MVT::v4i32)
5027 return Subtarget.hasSSE2();
5030 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5031 const SDNode *N, CombineLevel Level) const {
5032 assert(((N->getOpcode() == ISD::SHL &&
5033 N->getOperand(0).getOpcode() == ISD::SRL) ||
5034 (N->getOpcode() == ISD::SRL &&
5035 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5036 "Expected shift-shift mask");
5037 EVT VT = N->getValueType(0);
5038 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5039 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5040 // Only fold if the shift values are equal - so it folds to AND.
5041 // TODO - we should fold if either is a non-uniform vector but we don't do
5042 // the fold for non-splats yet.
5043 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5045 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5048 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5049 EVT VT = Y.getValueType();
5051 // For vectors, we don't have a preference, but we probably want a mask.
5055 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5056 if (VT == MVT::i64 && !Subtarget.is64Bit())
5062 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5064 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5065 !Subtarget.isOSWindows())
5070 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5071 // Any legal vector type can be splatted more efficiently than
5072 // loading/spilling from memory.
5073 return isTypeLegal(VT);
5076 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5077 MVT VT = MVT::getIntegerVT(NumBits);
5078 if (isTypeLegal(VT))
5081 // PMOVMSKB can handle this.
5082 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5085 // VPMOVMSKB can handle this.
5086 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5089 // TODO: Allow 64-bit type for 32-bit target.
5090 // TODO: 512-bit types should be allowed, but make sure that those
5091 // cases are handled in combineVectorSizedSetCCEquality().
5093 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5096 /// Val is the undef sentinel value or equal to the specified value.
5097 static bool isUndefOrEqual(int Val, int CmpVal) {
5098 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5101 /// Val is either the undef or zero sentinel value.
5102 static bool isUndefOrZero(int Val) {
5103 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5106 /// Return true if every element in Mask, beginning from position Pos and ending
5107 /// in Pos+Size is the undef sentinel value.
5108 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5109 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
5110 if (Mask[i] != SM_SentinelUndef)
5115 /// Return true if the mask creates a vector whose lower half is undefined.
5116 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5117 unsigned NumElts = Mask.size();
5118 return isUndefInRange(Mask, 0, NumElts / 2);
5121 /// Return true if the mask creates a vector whose upper half is undefined.
5122 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5123 unsigned NumElts = Mask.size();
5124 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5127 /// Return true if Val falls within the specified range (L, H].
5128 static bool isInRange(int Val, int Low, int Hi) {
5129 return (Val >= Low && Val < Hi);
5132 /// Return true if the value of any element in Mask falls within the specified
5134 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5136 if (isInRange(M, Low, Hi))
5141 /// Return true if Val is undef or if its value falls within the
5142 /// specified range (L, H].
5143 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5144 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5147 /// Return true if every element in Mask is undef or if its value
5148 /// falls within the specified range (L, H].
5149 static bool isUndefOrInRange(ArrayRef<int> Mask,
5152 if (!isUndefOrInRange(M, Low, Hi))
5157 /// Return true if Val is undef, zero or if its value falls within the
5158 /// specified range (L, H].
5159 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5160 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5163 /// Return true if every element in Mask is undef, zero or if its value
5164 /// falls within the specified range (L, H].
5165 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5167 if (!isUndefOrZeroOrInRange(M, Low, Hi))
5172 /// Return true if every element in Mask, beginning
5173 /// from position Pos and ending in Pos + Size, falls within the specified
5174 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5175 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5176 unsigned Size, int Low, int Step = 1) {
5177 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5178 if (!isUndefOrEqual(Mask[i], Low))
5183 /// Return true if every element in Mask, beginning
5184 /// from position Pos and ending in Pos+Size, falls within the specified
5185 /// sequential range (Low, Low+Size], or is undef or is zero.
5186 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5187 unsigned Size, int Low) {
5188 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
5189 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5194 /// Return true if every element in Mask, beginning
5195 /// from position Pos and ending in Pos+Size is undef or is zero.
5196 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5198 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
5199 if (!isUndefOrZero(Mask[i]))
5204 /// Helper function to test whether a shuffle mask could be
5205 /// simplified by widening the elements being shuffled.
5207 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5208 /// leaves it in an unspecified state.
5210 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5211 /// shuffle masks. The latter have the special property of a '-2' representing
5212 /// a zero-ed lane of a vector.
5213 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5214 SmallVectorImpl<int> &WidenedMask) {
5215 WidenedMask.assign(Mask.size() / 2, 0);
5216 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5218 int M1 = Mask[i + 1];
5220 // If both elements are undef, its trivial.
5221 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5222 WidenedMask[i / 2] = SM_SentinelUndef;
5226 // Check for an undef mask and a mask value properly aligned to fit with
5227 // a pair of values. If we find such a case, use the non-undef mask's value.
5228 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5229 WidenedMask[i / 2] = M1 / 2;
5232 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5233 WidenedMask[i / 2] = M0 / 2;
5237 // When zeroing, we need to spread the zeroing across both lanes to widen.
5238 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5239 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5240 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5241 WidenedMask[i / 2] = SM_SentinelZero;
5247 // Finally check if the two mask values are adjacent and aligned with
5249 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5250 WidenedMask[i / 2] = M0 / 2;
5254 // Otherwise we can't safely widen the elements used in this shuffle.
5257 assert(WidenedMask.size() == Mask.size() / 2 &&
5258 "Incorrect size of mask after widening the elements!");
5263 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5264 const APInt &Zeroable,
5265 SmallVectorImpl<int> &WidenedMask) {
5266 SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
5267 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
5268 if (TargetMask[i] == SM_SentinelUndef)
5271 TargetMask[i] = SM_SentinelZero;
5273 return canWidenShuffleElements(TargetMask, WidenedMask);
5276 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5277 SmallVector<int, 32> WidenedMask;
5278 return canWidenShuffleElements(Mask, WidenedMask);
5281 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5282 bool X86::isZeroNode(SDValue Elt) {
5283 return isNullConstant(Elt) || isNullFPConstant(Elt);
5286 // Build a vector of constants.
5287 // Use an UNDEF node if MaskElt == -1.
5288 // Split 64-bit constants in the 32-bit mode.
5289 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5290 const SDLoc &dl, bool IsMask = false) {
5292 SmallVector<SDValue, 32> Ops;
5295 MVT ConstVecVT = VT;
5296 unsigned NumElts = VT.getVectorNumElements();
5297 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5298 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5299 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5303 MVT EltVT = ConstVecVT.getVectorElementType();
5304 for (unsigned i = 0; i < NumElts; ++i) {
5305 bool IsUndef = Values[i] < 0 && IsMask;
5306 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5307 DAG.getConstant(Values[i], dl, EltVT);
5308 Ops.push_back(OpNode);
5310 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5311 DAG.getConstant(0, dl, EltVT));
5313 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5315 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5319 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5320 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5321 assert(Bits.size() == Undefs.getBitWidth() &&
5322 "Unequal constant and undef arrays");
5323 SmallVector<SDValue, 32> Ops;
5326 MVT ConstVecVT = VT;
5327 unsigned NumElts = VT.getVectorNumElements();
5328 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5329 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5330 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5334 MVT EltVT = ConstVecVT.getVectorElementType();
5335 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5337 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5340 const APInt &V = Bits[i];
5341 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5343 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5344 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5345 } else if (EltVT == MVT::f32) {
5346 APFloat FV(APFloat::IEEEsingle(), V);
5347 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5348 } else if (EltVT == MVT::f64) {
5349 APFloat FV(APFloat::IEEEdouble(), V);
5350 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5352 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5356 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5357 return DAG.getBitcast(VT, ConstsNode);
5360 /// Returns a vector of specified type with all zero elements.
5361 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5362 SelectionDAG &DAG, const SDLoc &dl) {
5363 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5364 VT.getVectorElementType() == MVT::i1) &&
5365 "Unexpected vector type");
5367 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5368 // type. This ensures they get CSE'd. But if the integer type is not
5369 // available, use a floating-point +0.0 instead.
5371 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5372 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5373 } else if (VT.getVectorElementType() == MVT::i1) {
5374 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5375 "Unexpected vector type");
5376 Vec = DAG.getConstant(0, dl, VT);
5378 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5379 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5381 return DAG.getBitcast(VT, Vec);
5384 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5385 const SDLoc &dl, unsigned vectorWidth) {
5386 EVT VT = Vec.getValueType();
5387 EVT ElVT = VT.getVectorElementType();
5388 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5389 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5390 VT.getVectorNumElements()/Factor);
5392 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5393 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5394 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5396 // This is the index of the first element of the vectorWidth-bit chunk
5397 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5398 IdxVal &= ~(ElemsPerChunk - 1);
5400 // If the input is a buildvector just emit a smaller one.
5401 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5402 return DAG.getBuildVector(ResultVT, dl,
5403 Vec->ops().slice(IdxVal, ElemsPerChunk));
5405 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5406 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5409 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5410 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5411 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5412 /// instructions or a simple subregister reference. Idx is an index in the
5413 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5414 /// lowering EXTRACT_VECTOR_ELT operations easier.
5415 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5416 SelectionDAG &DAG, const SDLoc &dl) {
5417 assert((Vec.getValueType().is256BitVector() ||
5418 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5419 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5422 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5423 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5424 SelectionDAG &DAG, const SDLoc &dl) {
5425 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5426 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5429 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5430 SelectionDAG &DAG, const SDLoc &dl,
5431 unsigned vectorWidth) {
5432 assert((vectorWidth == 128 || vectorWidth == 256) &&
5433 "Unsupported vector width");
5434 // Inserting UNDEF is Result
5437 EVT VT = Vec.getValueType();
5438 EVT ElVT = VT.getVectorElementType();
5439 EVT ResultVT = Result.getValueType();
5441 // Insert the relevant vectorWidth bits.
5442 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5443 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5445 // This is the index of the first element of the vectorWidth-bit chunk
5446 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5447 IdxVal &= ~(ElemsPerChunk - 1);
5449 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5450 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5453 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5454 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5455 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5456 /// simple superregister reference. Idx is an index in the 128 bits
5457 /// we want. It need not be aligned to a 128-bit boundary. That makes
5458 /// lowering INSERT_VECTOR_ELT operations easier.
5459 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5460 SelectionDAG &DAG, const SDLoc &dl) {
5461 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5462 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5465 /// Widen a vector to a larger size with the same scalar type, with the new
5466 /// elements either zero or undef.
5467 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5468 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5470 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5471 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5472 "Unsupported vector widening type");
5473 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5475 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5476 DAG.getIntPtrConstant(0, dl));
5479 /// Widen a vector to a larger size with the same scalar type, with the new
5480 /// elements either zero or undef.
5481 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5482 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5483 const SDLoc &dl, unsigned WideSizeInBits) {
5484 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5485 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5486 "Unsupported vector widening type");
5487 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5488 MVT SVT = Vec.getSimpleValueType().getScalarType();
5489 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5490 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5493 // Helper function to collect subvector ops that are concated together,
5494 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5495 // The subvectors in Ops are guaranteed to be the same type.
5496 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5497 assert(Ops.empty() && "Expected an empty ops vector");
5499 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5500 Ops.append(N->op_begin(), N->op_end());
5504 if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5505 isa<ConstantSDNode>(N->getOperand(2))) {
5506 SDValue Src = N->getOperand(0);
5507 SDValue Sub = N->getOperand(1);
5508 const APInt &Idx = N->getConstantOperandAPInt(2);
5509 EVT VT = Src.getValueType();
5510 EVT SubVT = Sub.getValueType();
5512 // TODO - Handle more general insert_subvector chains.
5513 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5514 Idx == (VT.getVectorNumElements() / 2) &&
5515 Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5516 Src.getOperand(1).getValueType() == SubVT &&
5517 isNullConstant(Src.getOperand(2))) {
5518 Ops.push_back(Src.getOperand(1));
5527 // Helper for splitting operands of an operation to legal target size and
5528 // apply a function on each part.
5529 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5530 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5531 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5532 // The argument Builder is a function that will be applied on each split part:
5533 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5534 template <typename F>
5535 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5536 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5537 F Builder, bool CheckBWI = true) {
5538 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5539 unsigned NumSubs = 1;
5540 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5541 (!CheckBWI && Subtarget.useAVX512Regs())) {
5542 if (VT.getSizeInBits() > 512) {
5543 NumSubs = VT.getSizeInBits() / 512;
5544 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5546 } else if (Subtarget.hasAVX2()) {
5547 if (VT.getSizeInBits() > 256) {
5548 NumSubs = VT.getSizeInBits() / 256;
5549 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5552 if (VT.getSizeInBits() > 128) {
5553 NumSubs = VT.getSizeInBits() / 128;
5554 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5559 return Builder(DAG, DL, Ops);
5561 SmallVector<SDValue, 4> Subs;
5562 for (unsigned i = 0; i != NumSubs; ++i) {
5563 SmallVector<SDValue, 2> SubOps;
5564 for (SDValue Op : Ops) {
5565 EVT OpVT = Op.getValueType();
5566 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5567 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5568 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5570 Subs.push_back(Builder(DAG, DL, SubOps));
5572 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5575 /// Insert i1-subvector to i1-vector.
5576 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5577 const X86Subtarget &Subtarget) {
5580 SDValue Vec = Op.getOperand(0);
5581 SDValue SubVec = Op.getOperand(1);
5582 SDValue Idx = Op.getOperand(2);
5584 if (!isa<ConstantSDNode>(Idx))
5587 // Inserting undef is a nop. We can just return the original vector.
5588 if (SubVec.isUndef())
5591 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5592 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5595 MVT OpVT = Op.getSimpleValueType();
5596 unsigned NumElems = OpVT.getVectorNumElements();
5598 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5600 // Extend to natively supported kshift.
5601 MVT WideOpVT = OpVT;
5602 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5603 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5605 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5607 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5608 // May need to promote to a legal type.
5609 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5610 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5612 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5615 MVT SubVecVT = SubVec.getSimpleValueType();
5616 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5618 assert(IdxVal + SubVecNumElems <= NumElems &&
5619 IdxVal % SubVecVT.getSizeInBits() == 0 &&
5620 "Unexpected index value in INSERT_SUBVECTOR");
5622 SDValue Undef = DAG.getUNDEF(WideOpVT);
5625 // Zero lower bits of the Vec
5626 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
5627 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5629 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5630 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5631 // Merge them together, SubVec should be zero extended.
5632 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5633 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5635 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5636 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5639 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5640 Undef, SubVec, ZeroIdx);
5642 if (Vec.isUndef()) {
5643 assert(IdxVal != 0 && "Unexpected index");
5644 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5645 DAG.getConstant(IdxVal, dl, MVT::i8));
5646 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5649 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5650 assert(IdxVal != 0 && "Unexpected index");
5651 NumElems = WideOpVT.getVectorNumElements();
5652 unsigned ShiftLeft = NumElems - SubVecNumElems;
5653 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5654 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5655 DAG.getConstant(ShiftLeft, dl, MVT::i8));
5656 if (ShiftRight != 0)
5657 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5658 DAG.getConstant(ShiftRight, dl, MVT::i8));
5659 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5662 // Simple case when we put subvector in the upper part
5663 if (IdxVal + SubVecNumElems == NumElems) {
5664 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5665 DAG.getConstant(IdxVal, dl, MVT::i8));
5666 if (SubVecNumElems * 2 == NumElems) {
5667 // Special case, use legal zero extending insert_subvector. This allows
5668 // isel to opimitize when bits are known zero.
5669 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5670 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5671 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5674 // Otherwise use explicit shifts to zero the bits.
5675 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5676 Undef, Vec, ZeroIdx);
5677 NumElems = WideOpVT.getVectorNumElements();
5678 SDValue ShiftBits = DAG.getConstant(NumElems - IdxVal, dl, MVT::i8);
5679 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5680 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5682 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5683 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5686 // Inserting into the middle is more complicated.
5688 NumElems = WideOpVT.getVectorNumElements();
5690 // Widen the vector if needed.
5691 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5692 // Move the current value of the bit to be replace to the lsbs.
5693 Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5694 DAG.getConstant(IdxVal, dl, MVT::i8));
5695 // Xor with the new bit.
5696 Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Op, SubVec);
5697 // Shift to MSB, filling bottom bits with 0.
5698 unsigned ShiftLeft = NumElems - SubVecNumElems;
5699 Op = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Op,
5700 DAG.getConstant(ShiftLeft, dl, MVT::i8));
5701 // Shift to the final position, filling upper bits with 0.
5702 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5703 Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Op,
5704 DAG.getConstant(ShiftRight, dl, MVT::i8));
5705 // Xor with original vector leaving the new value.
5706 Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Vec, Op);
5707 // Reduce to original width if needed.
5708 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5711 static SDValue concatSubVectors(SDValue V1, SDValue V2, EVT VT,
5712 unsigned NumElems, SelectionDAG &DAG,
5713 const SDLoc &dl, unsigned VectorWidth) {
5714 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, VectorWidth);
5715 return insertSubVector(V, V2, NumElems / 2, DAG, dl, VectorWidth);
5718 /// Returns a vector of specified type with all bits set.
5719 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5720 /// Then bitcast to their original type, ensuring they get CSE'd.
5721 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5722 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5723 "Expected a 128/256/512-bit vector type");
5725 APInt Ones = APInt::getAllOnesValue(32);
5726 unsigned NumElts = VT.getSizeInBits() / 32;
5727 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5728 return DAG.getBitcast(VT, Vec);
5731 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
5732 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
5734 case ISD::ANY_EXTEND:
5735 case ISD::ANY_EXTEND_VECTOR_INREG:
5736 return ISD::ANY_EXTEND_VECTOR_INREG;
5737 case ISD::ZERO_EXTEND:
5738 case ISD::ZERO_EXTEND_VECTOR_INREG:
5739 return ISD::ZERO_EXTEND_VECTOR_INREG;
5740 case ISD::SIGN_EXTEND:
5741 case ISD::SIGN_EXTEND_VECTOR_INREG:
5742 return ISD::SIGN_EXTEND_VECTOR_INREG;
5744 llvm_unreachable("Unknown opcode");
5747 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
5748 SDValue In, SelectionDAG &DAG) {
5749 EVT InVT = In.getValueType();
5750 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
5751 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
5752 ISD::ZERO_EXTEND == Opcode) &&
5753 "Unknown extension opcode");
5755 // For 256-bit vectors, we only need the lower (128-bit) input half.
5756 // For 512-bit vectors, we only need the lower input half or quarter.
5757 if (InVT.getSizeInBits() > 128) {
5758 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
5759 "Expected VTs to be the same size!");
5760 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
5761 In = extractSubVector(In, 0, DAG, DL,
5762 std::max(128U, VT.getSizeInBits() / Scale));
5763 InVT = In.getValueType();
5766 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
5767 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
5769 return DAG.getNode(Opcode, DL, VT, In);
5772 /// Returns a vector_shuffle node for an unpackl operation.
5773 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5774 SDValue V1, SDValue V2) {
5775 SmallVector<int, 8> Mask;
5776 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
5777 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5780 /// Returns a vector_shuffle node for an unpackh operation.
5781 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5782 SDValue V1, SDValue V2) {
5783 SmallVector<int, 8> Mask;
5784 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
5785 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5788 /// Return a vector_shuffle of the specified vector of zero or undef vector.
5789 /// This produces a shuffle where the low element of V2 is swizzled into the
5790 /// zero/undef vector, landing at element Idx.
5791 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5792 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
5794 const X86Subtarget &Subtarget,
5795 SelectionDAG &DAG) {
5796 MVT VT = V2.getSimpleValueType();
5798 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5799 int NumElems = VT.getVectorNumElements();
5800 SmallVector<int, 16> MaskVec(NumElems);
5801 for (int i = 0; i != NumElems; ++i)
5802 // If this is the insertion idx, put the low elt of V2 here.
5803 MaskVec[i] = (i == Idx) ? NumElems : i;
5804 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
5807 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
5808 if (!Load || !ISD::isNormalLoad(Load))
5811 SDValue Ptr = Load->getBasePtr();
5812 if (Ptr->getOpcode() == X86ISD::Wrapper ||
5813 Ptr->getOpcode() == X86ISD::WrapperRIP)
5814 Ptr = Ptr->getOperand(0);
5816 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
5817 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
5820 return CNode->getConstVal();
5823 static const Constant *getTargetConstantFromNode(SDValue Op) {
5824 Op = peekThroughBitcasts(Op);
5825 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
5829 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
5830 assert(LD && "Unexpected null LoadSDNode");
5831 return getTargetConstantFromNode(LD);
5834 // Extract raw constant bits from constant pools.
5835 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
5837 SmallVectorImpl<APInt> &EltBits,
5838 bool AllowWholeUndefs = true,
5839 bool AllowPartialUndefs = true) {
5840 assert(EltBits.empty() && "Expected an empty EltBits vector");
5842 Op = peekThroughBitcasts(Op);
5844 EVT VT = Op.getValueType();
5845 unsigned SizeInBits = VT.getSizeInBits();
5846 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
5847 unsigned NumElts = SizeInBits / EltSizeInBits;
5849 // Bitcast a source array of element bits to the target size.
5850 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
5851 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
5852 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
5853 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
5854 "Constant bit sizes don't match");
5856 // Don't split if we don't allow undef bits.
5857 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
5858 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
5861 // If we're already the right size, don't bother bitcasting.
5862 if (NumSrcElts == NumElts) {
5863 UndefElts = UndefSrcElts;
5864 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
5868 // Extract all the undef/constant element data and pack into single bitsets.
5869 APInt UndefBits(SizeInBits, 0);
5870 APInt MaskBits(SizeInBits, 0);
5872 for (unsigned i = 0; i != NumSrcElts; ++i) {
5873 unsigned BitOffset = i * SrcEltSizeInBits;
5874 if (UndefSrcElts[i])
5875 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
5876 MaskBits.insertBits(SrcEltBits[i], BitOffset);
5879 // Split the undef/constant single bitset data into the target elements.
5880 UndefElts = APInt(NumElts, 0);
5881 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
5883 for (unsigned i = 0; i != NumElts; ++i) {
5884 unsigned BitOffset = i * EltSizeInBits;
5885 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
5887 // Only treat an element as UNDEF if all bits are UNDEF.
5888 if (UndefEltBits.isAllOnesValue()) {
5889 if (!AllowWholeUndefs)
5891 UndefElts.setBit(i);
5895 // If only some bits are UNDEF then treat them as zero (or bail if not
5897 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
5900 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
5905 // Collect constant bits and insert into mask/undef bit masks.
5906 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
5907 unsigned UndefBitIndex) {
5910 if (isa<UndefValue>(Cst)) {
5911 Undefs.setBit(UndefBitIndex);
5914 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
5915 Mask = CInt->getValue();
5918 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
5919 Mask = CFP->getValueAPF().bitcastToAPInt();
5927 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
5928 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
5929 return CastBitData(UndefSrcElts, SrcEltBits);
5932 // Extract scalar constant bits.
5933 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
5934 APInt UndefSrcElts = APInt::getNullValue(1);
5935 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
5936 return CastBitData(UndefSrcElts, SrcEltBits);
5938 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
5939 APInt UndefSrcElts = APInt::getNullValue(1);
5940 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
5941 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
5942 return CastBitData(UndefSrcElts, SrcEltBits);
5945 // Extract constant bits from build vector.
5946 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
5947 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
5948 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5950 APInt UndefSrcElts(NumSrcElts, 0);
5951 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5952 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
5953 const SDValue &Src = Op.getOperand(i);
5954 if (Src.isUndef()) {
5955 UndefSrcElts.setBit(i);
5958 auto *Cst = cast<ConstantSDNode>(Src);
5959 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
5961 return CastBitData(UndefSrcElts, SrcEltBits);
5963 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
5964 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
5965 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5967 APInt UndefSrcElts(NumSrcElts, 0);
5968 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5969 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
5970 const SDValue &Src = Op.getOperand(i);
5971 if (Src.isUndef()) {
5972 UndefSrcElts.setBit(i);
5975 auto *Cst = cast<ConstantFPSDNode>(Src);
5976 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
5977 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
5979 return CastBitData(UndefSrcElts, SrcEltBits);
5982 // Extract constant bits from constant pool vector.
5983 if (auto *Cst = getTargetConstantFromNode(Op)) {
5984 Type *CstTy = Cst->getType();
5985 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
5986 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
5989 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
5990 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5992 APInt UndefSrcElts(NumSrcElts, 0);
5993 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
5994 for (unsigned i = 0; i != NumSrcElts; ++i)
5995 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
5999 return CastBitData(UndefSrcElts, SrcEltBits);
6002 // Extract constant bits from a broadcasted constant pool scalar.
6003 if (Op.getOpcode() == X86ISD::VBROADCAST &&
6004 EltSizeInBits <= VT.getScalarSizeInBits()) {
6005 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6006 unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6007 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6009 APInt UndefSrcElts(NumSrcElts, 0);
6010 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6011 if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6012 if (UndefSrcElts[0])
6013 UndefSrcElts.setBits(0, NumSrcElts);
6014 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6015 return CastBitData(UndefSrcElts, SrcEltBits);
6020 // Extract constant bits from a subvector broadcast.
6021 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6022 SmallVector<APInt, 16> SubEltBits;
6023 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6024 UndefElts, SubEltBits, AllowWholeUndefs,
6025 AllowPartialUndefs)) {
6026 UndefElts = APInt::getSplat(NumElts, UndefElts);
6027 while (EltBits.size() < NumElts)
6028 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6033 // Extract a rematerialized scalar constant insertion.
6034 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6035 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6036 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6037 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6038 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6040 APInt UndefSrcElts(NumSrcElts, 0);
6041 SmallVector<APInt, 64> SrcEltBits;
6042 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6043 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6044 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6045 return CastBitData(UndefSrcElts, SrcEltBits);
6048 // Insert constant bits from a base and sub vector sources.
6049 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6050 isa<ConstantSDNode>(Op.getOperand(2))) {
6051 // TODO - support insert_subvector through bitcasts.
6052 if (EltSizeInBits != VT.getScalarSizeInBits())
6056 SmallVector<APInt, 32> EltSubBits;
6057 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6058 UndefSubElts, EltSubBits,
6059 AllowWholeUndefs, AllowPartialUndefs) &&
6060 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6061 UndefElts, EltBits, AllowWholeUndefs,
6062 AllowPartialUndefs)) {
6063 unsigned BaseIdx = Op.getConstantOperandVal(2);
6064 UndefElts.insertBits(UndefSubElts, BaseIdx);
6065 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6066 EltBits[BaseIdx + i] = EltSubBits[i];
6071 // Extract constant bits from a subvector's source.
6072 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6073 isa<ConstantSDNode>(Op.getOperand(1))) {
6074 // TODO - support extract_subvector through bitcasts.
6075 if (EltSizeInBits != VT.getScalarSizeInBits())
6078 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6079 UndefElts, EltBits, AllowWholeUndefs,
6080 AllowPartialUndefs)) {
6081 EVT SrcVT = Op.getOperand(0).getValueType();
6082 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6083 unsigned NumSubElts = VT.getVectorNumElements();
6084 unsigned BaseIdx = Op.getConstantOperandVal(1);
6085 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6086 if ((BaseIdx + NumSubElts) != NumSrcElts)
6087 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6089 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6094 // Extract constant bits from shuffle node sources.
6095 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6096 // TODO - support shuffle through bitcasts.
6097 if (EltSizeInBits != VT.getScalarSizeInBits())
6100 ArrayRef<int> Mask = SVN->getMask();
6101 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6102 llvm::any_of(Mask, [](int M) { return M < 0; }))
6105 APInt UndefElts0, UndefElts1;
6106 SmallVector<APInt, 32> EltBits0, EltBits1;
6107 if (isAnyInRange(Mask, 0, NumElts) &&
6108 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6109 UndefElts0, EltBits0, AllowWholeUndefs,
6110 AllowPartialUndefs))
6112 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6113 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6114 UndefElts1, EltBits1, AllowWholeUndefs,
6115 AllowPartialUndefs))
6118 UndefElts = APInt::getNullValue(NumElts);
6119 for (int i = 0; i != (int)NumElts; ++i) {
6122 UndefElts.setBit(i);
6123 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6124 } else if (M < (int)NumElts) {
6126 UndefElts.setBit(i);
6127 EltBits.push_back(EltBits0[M]);
6129 if (UndefElts1[M - NumElts])
6130 UndefElts.setBit(i);
6131 EltBits.push_back(EltBits1[M - NumElts]);
6140 static bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6142 SmallVector<APInt, 16> EltBits;
6143 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6144 UndefElts, EltBits, true, false)) {
6145 int SplatIndex = -1;
6146 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6149 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6155 if (0 <= SplatIndex) {
6156 SplatVal = EltBits[SplatIndex];
6164 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6165 unsigned MaskEltSizeInBits,
6166 SmallVectorImpl<uint64_t> &RawMask,
6168 // Extract the raw target constant bits.
6169 SmallVector<APInt, 64> EltBits;
6170 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6171 EltBits, /* AllowWholeUndefs */ true,
6172 /* AllowPartialUndefs */ false))
6175 // Insert the extracted elements into the mask.
6176 for (APInt Elt : EltBits)
6177 RawMask.push_back(Elt.getZExtValue());
6182 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6183 /// Note: This ignores saturation, so inputs must be checked first.
6184 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6186 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6187 unsigned NumElts = VT.getVectorNumElements();
6188 unsigned NumLanes = VT.getSizeInBits() / 128;
6189 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6190 unsigned Offset = Unary ? 0 : NumElts;
6192 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6193 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6194 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6195 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6196 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6200 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6201 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6202 APInt &DemandedLHS, APInt &DemandedRHS) {
6203 int NumLanes = VT.getSizeInBits() / 128;
6204 int NumElts = DemandedElts.getBitWidth();
6205 int NumInnerElts = NumElts / 2;
6206 int NumEltsPerLane = NumElts / NumLanes;
6207 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6209 DemandedLHS = APInt::getNullValue(NumInnerElts);
6210 DemandedRHS = APInt::getNullValue(NumInnerElts);
6212 // Map DemandedElts to the packed operands.
6213 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6214 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6215 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6216 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6217 if (DemandedElts[OuterIdx])
6218 DemandedLHS.setBit(InnerIdx);
6219 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6220 DemandedRHS.setBit(InnerIdx);
6225 // Split the demanded elts of a HADD/HSUB node between its operands.
6226 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6227 APInt &DemandedLHS, APInt &DemandedRHS) {
6228 int NumLanes = VT.getSizeInBits() / 128;
6229 int NumElts = DemandedElts.getBitWidth();
6230 int NumEltsPerLane = NumElts / NumLanes;
6231 int HalfEltsPerLane = NumEltsPerLane / 2;
6233 DemandedLHS = APInt::getNullValue(NumElts);
6234 DemandedRHS = APInt::getNullValue(NumElts);
6236 // Map DemandedElts to the horizontal operands.
6237 for (int Idx = 0; Idx != NumElts; ++Idx) {
6238 if (!DemandedElts[Idx])
6240 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6241 int LocalIdx = Idx % NumEltsPerLane;
6242 if (LocalIdx < HalfEltsPerLane) {
6243 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6244 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6246 LocalIdx -= HalfEltsPerLane;
6247 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6248 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6253 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6254 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6255 /// operands in \p Ops, and returns true.
6256 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6257 /// IsUnary for shuffles which use a single input multiple times, and in those
6258 /// cases it will adjust the mask to only have indices within that single input.
6259 /// It is an error to call this with non-empty Mask/Ops vectors.
6260 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6261 SmallVectorImpl<SDValue> &Ops,
6262 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6263 unsigned NumElems = VT.getVectorNumElements();
6264 unsigned MaskEltSize = VT.getScalarSizeInBits();
6265 SmallVector<uint64_t, 32> RawMask;
6269 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6270 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6273 bool IsFakeUnary = false;
6274 switch (N->getOpcode()) {
6275 case X86ISD::BLENDI:
6276 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6277 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6278 ImmN = N->getOperand(N->getNumOperands() - 1);
6279 DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6280 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6283 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6284 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6285 ImmN = N->getOperand(N->getNumOperands() - 1);
6286 DecodeSHUFPMask(NumElems, MaskEltSize,
6287 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6288 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6290 case X86ISD::INSERTPS:
6291 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6292 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6293 ImmN = N->getOperand(N->getNumOperands() - 1);
6294 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6295 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6297 case X86ISD::EXTRQI:
6298 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6299 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6300 isa<ConstantSDNode>(N->getOperand(2))) {
6301 int BitLen = N->getConstantOperandVal(1);
6302 int BitIdx = N->getConstantOperandVal(2);
6303 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6307 case X86ISD::INSERTQI:
6308 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6309 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6310 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6311 isa<ConstantSDNode>(N->getOperand(3))) {
6312 int BitLen = N->getConstantOperandVal(2);
6313 int BitIdx = N->getConstantOperandVal(3);
6314 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6315 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6318 case X86ISD::UNPCKH:
6319 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6320 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6321 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6322 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6324 case X86ISD::UNPCKL:
6325 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6326 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6327 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6328 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6330 case X86ISD::MOVHLPS:
6331 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6332 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6333 DecodeMOVHLPSMask(NumElems, Mask);
6334 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6336 case X86ISD::MOVLHPS:
6337 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6338 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6339 DecodeMOVLHPSMask(NumElems, Mask);
6340 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6342 case X86ISD::PALIGNR:
6343 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6344 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6345 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6346 ImmN = N->getOperand(N->getNumOperands() - 1);
6347 DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6349 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6350 Ops.push_back(N->getOperand(1));
6351 Ops.push_back(N->getOperand(0));
6353 case X86ISD::VSHLDQ:
6354 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6355 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6356 ImmN = N->getOperand(N->getNumOperands() - 1);
6357 DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6361 case X86ISD::VSRLDQ:
6362 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6363 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6364 ImmN = N->getOperand(N->getNumOperands() - 1);
6365 DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6369 case X86ISD::PSHUFD:
6370 case X86ISD::VPERMILPI:
6371 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6372 ImmN = N->getOperand(N->getNumOperands() - 1);
6373 DecodePSHUFMask(NumElems, MaskEltSize,
6374 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6377 case X86ISD::PSHUFHW:
6378 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6379 ImmN = N->getOperand(N->getNumOperands() - 1);
6380 DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6384 case X86ISD::PSHUFLW:
6385 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6386 ImmN = N->getOperand(N->getNumOperands() - 1);
6387 DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6391 case X86ISD::VZEXT_MOVL:
6392 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6393 DecodeZeroMoveLowMask(NumElems, Mask);
6396 case X86ISD::VBROADCAST: {
6397 SDValue N0 = N->getOperand(0);
6398 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6399 // add the pre-extracted value to the Ops vector.
6400 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6401 N0.getOperand(0).getValueType() == VT &&
6402 N0.getConstantOperandVal(1) == 0)
6403 Ops.push_back(N0.getOperand(0));
6405 // We only decode broadcasts of same-sized vectors, unless the broadcast
6406 // came from an extract from the original width. If we found one, we
6407 // pushed it the Ops vector above.
6408 if (N0.getValueType() == VT || !Ops.empty()) {
6409 DecodeVectorBroadcast(NumElems, Mask);
6415 case X86ISD::VPERMILPV: {
6416 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6418 SDValue MaskNode = N->getOperand(1);
6419 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6421 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6426 case X86ISD::PSHUFB: {
6427 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6428 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6429 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6431 SDValue MaskNode = N->getOperand(1);
6432 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6433 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6438 case X86ISD::VPERMI:
6439 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6440 ImmN = N->getOperand(N->getNumOperands() - 1);
6441 DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6446 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6447 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6448 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6450 case X86ISD::VPERM2X128:
6451 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6452 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6453 ImmN = N->getOperand(N->getNumOperands() - 1);
6454 DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6456 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6458 case X86ISD::SHUF128:
6459 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6460 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6461 ImmN = N->getOperand(N->getNumOperands() - 1);
6462 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6463 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6464 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6466 case X86ISD::MOVSLDUP:
6467 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6468 DecodeMOVSLDUPMask(NumElems, Mask);
6471 case X86ISD::MOVSHDUP:
6472 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6473 DecodeMOVSHDUPMask(NumElems, Mask);
6476 case X86ISD::MOVDDUP:
6477 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6478 DecodeMOVDDUPMask(NumElems, Mask);
6481 case X86ISD::VPERMIL2: {
6482 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6483 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6484 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6485 SDValue MaskNode = N->getOperand(2);
6486 SDValue CtrlNode = N->getOperand(3);
6487 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6488 unsigned CtrlImm = CtrlOp->getZExtValue();
6489 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6491 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6498 case X86ISD::VPPERM: {
6499 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6500 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6501 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6502 SDValue MaskNode = N->getOperand(2);
6503 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6504 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6509 case X86ISD::VPERMV: {
6510 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6512 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6513 Ops.push_back(N->getOperand(1));
6514 SDValue MaskNode = N->getOperand(0);
6515 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6517 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6522 case X86ISD::VPERMV3: {
6523 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6524 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6525 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6526 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6527 Ops.push_back(N->getOperand(0));
6528 Ops.push_back(N->getOperand(2));
6529 SDValue MaskNode = N->getOperand(1);
6530 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6532 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6537 default: llvm_unreachable("unknown target shuffle node");
6540 // Empty mask indicates the decode failed.
6544 // Check if we're getting a shuffle mask with zero'd elements.
6545 if (!AllowSentinelZero)
6546 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6549 // If we have a fake unary shuffle, the shuffle mask is spread across two
6550 // inputs that are actually the same node. Re-map the mask to always point
6551 // into the first input.
6554 if (M >= (int)Mask.size())
6557 // If we didn't already add operands in the opcode-specific code, default to
6558 // adding 1 or 2 operands starting at 0.
6560 Ops.push_back(N->getOperand(0));
6561 if (!IsUnary || IsFakeUnary)
6562 Ops.push_back(N->getOperand(1));
6568 /// Check a target shuffle mask's inputs to see if we can set any values to
6569 /// SM_SentinelZero - this is for elements that are known to be zero
6570 /// (not just zeroable) from their inputs.
6571 /// Returns true if the target shuffle mask was decoded.
6572 static bool setTargetShuffleZeroElements(SDValue N,
6573 SmallVectorImpl<int> &Mask,
6574 SmallVectorImpl<SDValue> &Ops) {
6576 if (!isTargetShuffle(N.getOpcode()))
6579 MVT VT = N.getSimpleValueType();
6580 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
6583 SDValue V1 = Ops[0];
6584 SDValue V2 = IsUnary ? V1 : Ops[1];
6586 V1 = peekThroughBitcasts(V1);
6587 V2 = peekThroughBitcasts(V2);
6589 assert((VT.getSizeInBits() % Mask.size()) == 0 &&
6590 "Illegal split of shuffle value type");
6591 unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size();
6593 // Extract known constant input data.
6594 APInt UndefSrcElts[2];
6595 SmallVector<APInt, 32> SrcEltBits[2];
6596 bool IsSrcConstant[2] = {
6597 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
6598 SrcEltBits[0], true, false),
6599 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
6600 SrcEltBits[1], true, false)};
6602 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
6605 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
6609 // Determine shuffle input and normalize the mask.
6610 unsigned SrcIdx = M / Size;
6611 SDValue V = M < Size ? V1 : V2;
6614 // We are referencing an UNDEF input.
6616 Mask[i] = SM_SentinelUndef;
6620 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
6621 // TODO: We currently only set UNDEF for integer types - floats use the same
6622 // registers as vectors and many of the scalar folded loads rely on the
6623 // SCALAR_TO_VECTOR pattern.
6624 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
6625 (Size % V.getValueType().getVectorNumElements()) == 0) {
6626 int Scale = Size / V.getValueType().getVectorNumElements();
6627 int Idx = M / Scale;
6628 if (Idx != 0 && !VT.isFloatingPoint())
6629 Mask[i] = SM_SentinelUndef;
6630 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
6631 Mask[i] = SM_SentinelZero;
6635 // Attempt to extract from the source's constant bits.
6636 if (IsSrcConstant[SrcIdx]) {
6637 if (UndefSrcElts[SrcIdx][M])
6638 Mask[i] = SM_SentinelUndef;
6639 else if (SrcEltBits[SrcIdx][M] == 0)
6640 Mask[i] = SM_SentinelZero;
6644 assert(VT.getVectorNumElements() == Mask.size() &&
6645 "Different mask size from vector size!");
6649 // Forward declaration (for getFauxShuffleMask recursive check).
6650 static bool resolveTargetShuffleInputs(SDValue Op,
6651 SmallVectorImpl<SDValue> &Inputs,
6652 SmallVectorImpl<int> &Mask,
6655 // Attempt to decode ops that could be represented as a shuffle mask.
6656 // The decoded shuffle mask may contain a different number of elements to the
6657 // destination value type.
6658 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
6659 SmallVectorImpl<int> &Mask,
6660 SmallVectorImpl<SDValue> &Ops,
6661 SelectionDAG &DAG) {
6665 MVT VT = N.getSimpleValueType();
6666 unsigned NumElts = VT.getVectorNumElements();
6667 unsigned NumSizeInBits = VT.getSizeInBits();
6668 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6669 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
6671 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
6673 unsigned Opcode = N.getOpcode();
6675 case ISD::VECTOR_SHUFFLE: {
6676 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
6677 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
6678 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
6679 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
6680 Ops.push_back(N.getOperand(0));
6681 Ops.push_back(N.getOperand(1));
6687 case X86ISD::ANDNP: {
6688 // Attempt to decode as a per-byte mask.
6690 SmallVector<APInt, 32> EltBits;
6691 SDValue N0 = N.getOperand(0);
6692 SDValue N1 = N.getOperand(1);
6693 bool IsAndN = (X86ISD::ANDNP == Opcode);
6694 uint64_t ZeroMask = IsAndN ? 255 : 0;
6695 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
6697 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
6699 Mask.push_back(SM_SentinelUndef);
6702 uint64_t ByteBits = EltBits[i].getZExtValue();
6703 if (ByteBits != 0 && ByteBits != 255)
6705 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
6707 Ops.push_back(IsAndN ? N1 : N0);
6711 // Inspect each operand at the byte level. We can merge these into a
6712 // blend shuffle mask if for each byte at least one is masked out (zero).
6713 KnownBits Known0 = DAG.computeKnownBits(N.getOperand(0), DemandedElts);
6714 KnownBits Known1 = DAG.computeKnownBits(N.getOperand(1), DemandedElts);
6715 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
6716 bool IsByteMask = true;
6717 unsigned NumSizeInBytes = NumSizeInBits / 8;
6718 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6719 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
6720 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
6721 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
6722 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
6723 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
6724 if (LHS == 255 && RHS == 0)
6725 SelectMask.setBit(i);
6726 else if (LHS == 255 && RHS == 255)
6728 else if (!(LHS == 0 && RHS == 255))
6732 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
6733 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
6734 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
6735 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
6736 Mask.push_back(Idx);
6739 Ops.push_back(N.getOperand(0));
6740 Ops.push_back(N.getOperand(1));
6745 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
6746 // is a valid shuffle index.
6747 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
6748 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
6749 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
6751 SmallVector<int, 64> SrcMask0, SrcMask1;
6752 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
6753 if (!resolveTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG) ||
6754 !resolveTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG))
6756 int MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
6757 SmallVector<int, 64> Mask0, Mask1;
6758 scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
6759 scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
6760 for (int i = 0; i != MaskSize; ++i) {
6761 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
6762 Mask.push_back(SM_SentinelUndef);
6763 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
6764 Mask.push_back(SM_SentinelZero);
6765 else if (Mask1[i] == SM_SentinelZero)
6766 Mask.push_back(Mask0[i]);
6767 else if (Mask0[i] == SM_SentinelZero)
6768 Mask.push_back(Mask1[i] + (MaskSize * SrcInputs0.size()));
6772 for (SDValue &Op : SrcInputs0)
6774 for (SDValue &Op : SrcInputs1)
6778 case ISD::INSERT_SUBVECTOR: {
6779 SDValue Src = N.getOperand(0);
6780 SDValue Sub = N.getOperand(1);
6781 EVT SubVT = Sub.getValueType();
6782 unsigned NumSubElts = SubVT.getVectorNumElements();
6783 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
6784 !N->isOnlyUserOf(Sub.getNode()))
6786 uint64_t InsertIdx = N.getConstantOperandVal(2);
6787 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
6788 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6789 Sub.getOperand(0).getValueType() == VT &&
6790 isa<ConstantSDNode>(Sub.getOperand(1))) {
6791 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
6792 for (int i = 0; i != (int)NumElts; ++i)
6794 for (int i = 0; i != (int)NumSubElts; ++i)
6795 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
6797 Ops.push_back(Sub.getOperand(0));
6800 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
6801 SmallVector<int, 64> SubMask;
6802 SmallVector<SDValue, 2> SubInputs;
6803 if (!resolveTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
6806 if (SubMask.size() != NumSubElts) {
6807 assert(((SubMask.size() % NumSubElts) == 0 ||
6808 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
6809 if ((NumSubElts % SubMask.size()) == 0) {
6810 int Scale = NumSubElts / SubMask.size();
6811 SmallVector<int,64> ScaledSubMask;
6812 scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
6813 SubMask = ScaledSubMask;
6815 int Scale = SubMask.size() / NumSubElts;
6816 NumSubElts = SubMask.size();
6822 for (SDValue &SubInput : SubInputs) {
6823 EVT SubSVT = SubInput.getValueType().getScalarType();
6824 EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
6825 NumSizeInBits / SubSVT.getSizeInBits());
6826 Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
6827 DAG.getUNDEF(AltVT), SubInput,
6828 DAG.getIntPtrConstant(0, SDLoc(N))));
6830 for (int i = 0; i != (int)NumElts; ++i)
6832 for (int i = 0; i != (int)NumSubElts; ++i) {
6835 int InputIdx = M / NumSubElts;
6836 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
6838 Mask[i + InsertIdx] = M;
6842 case ISD::SCALAR_TO_VECTOR: {
6843 // Match against a scalar_to_vector of an extract from a vector,
6844 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
6845 SDValue N0 = N.getOperand(0);
6848 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6849 N0.getOperand(0).getValueType() == VT) ||
6850 (N0.getOpcode() == X86ISD::PEXTRW &&
6851 N0.getOperand(0).getValueType() == MVT::v8i16) ||
6852 (N0.getOpcode() == X86ISD::PEXTRB &&
6853 N0.getOperand(0).getValueType() == MVT::v16i8)) {
6857 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
6860 SDValue SrcVec = SrcExtract.getOperand(0);
6861 EVT SrcVT = SrcVec.getValueType();
6862 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6863 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
6865 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
6866 if (NumSrcElts <= SrcIdx)
6869 Ops.push_back(SrcVec);
6870 Mask.push_back(SrcIdx);
6871 Mask.append(NumZeros, SM_SentinelZero);
6872 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
6875 case X86ISD::PINSRB:
6876 case X86ISD::PINSRW: {
6877 SDValue InVec = N.getOperand(0);
6878 SDValue InScl = N.getOperand(1);
6879 SDValue InIndex = N.getOperand(2);
6880 if (!isa<ConstantSDNode>(InIndex) ||
6881 cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
6883 uint64_t InIdx = N.getConstantOperandVal(2);
6885 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
6886 if (X86::isZeroNode(InScl)) {
6887 Ops.push_back(InVec);
6888 for (unsigned i = 0; i != NumElts; ++i)
6889 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
6893 // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
6894 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
6896 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
6897 if (InScl.getOpcode() != ExOp)
6900 SDValue ExVec = InScl.getOperand(0);
6901 SDValue ExIndex = InScl.getOperand(1);
6902 if (!isa<ConstantSDNode>(ExIndex) ||
6903 cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
6905 uint64_t ExIdx = InScl.getConstantOperandVal(1);
6907 Ops.push_back(InVec);
6908 Ops.push_back(ExVec);
6909 for (unsigned i = 0; i != NumElts; ++i)
6910 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
6913 case X86ISD::PACKSS:
6914 case X86ISD::PACKUS: {
6915 SDValue N0 = N.getOperand(0);
6916 SDValue N1 = N.getOperand(1);
6917 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
6918 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
6919 "Unexpected input value type");
6921 APInt EltsLHS, EltsRHS;
6922 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
6924 // If we know input saturation won't happen we can treat this
6925 // as a truncation shuffle.
6926 if (Opcode == X86ISD::PACKSS) {
6927 if ((!N0.isUndef() &&
6928 DAG.ComputeNumSignBits(N0, EltsLHS) <= NumBitsPerElt) ||
6930 DAG.ComputeNumSignBits(N1, EltsRHS) <= NumBitsPerElt))
6933 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
6934 if ((!N0.isUndef() && !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS)) ||
6935 (!N1.isUndef() && !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS)))
6939 bool IsUnary = (N0 == N1);
6945 createPackShuffleMask(VT, Mask, IsUnary);
6949 case X86ISD::VSRLI: {
6950 uint64_t ShiftVal = N.getConstantOperandVal(1);
6951 // Out of range bit shifts are guaranteed to be zero.
6952 if (NumBitsPerElt <= ShiftVal) {
6953 Mask.append(NumElts, SM_SentinelZero);
6957 // We can only decode 'whole byte' bit shifts as shuffles.
6958 if ((ShiftVal % 8) != 0)
6961 uint64_t ByteShift = ShiftVal / 8;
6962 unsigned NumBytes = NumSizeInBits / 8;
6963 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6964 Ops.push_back(N.getOperand(0));
6966 // Clear mask to all zeros and insert the shifted byte indices.
6967 Mask.append(NumBytes, SM_SentinelZero);
6969 if (X86ISD::VSHLI == Opcode) {
6970 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
6971 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6972 Mask[i + j] = i + j - ByteShift;
6974 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
6975 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6976 Mask[i + j - ByteShift] = i + j;
6980 case X86ISD::VBROADCAST: {
6981 SDValue Src = N.getOperand(0);
6982 MVT SrcVT = Src.getSimpleValueType();
6983 if (!SrcVT.isVector())
6986 if (NumSizeInBits != SrcVT.getSizeInBits()) {
6987 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
6988 "Illegal broadcast type");
6989 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
6990 NumSizeInBits / SrcVT.getScalarSizeInBits());
6991 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
6992 DAG.getUNDEF(SrcVT), Src,
6993 DAG.getIntPtrConstant(0, SDLoc(N)));
6997 Mask.append(NumElts, 0);
7000 case ISD::ZERO_EXTEND:
7001 case ISD::ANY_EXTEND:
7002 case ISD::ZERO_EXTEND_VECTOR_INREG:
7003 case ISD::ANY_EXTEND_VECTOR_INREG: {
7004 SDValue Src = N.getOperand(0);
7005 EVT SrcVT = Src.getValueType();
7007 // Extended source must be a simple vector.
7008 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7009 (SrcVT.getScalarSizeInBits() % 8) != 0)
7012 unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7014 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7015 DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7018 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7019 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7020 "Illegal zero-extension type");
7021 SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7022 NumSizeInBits / NumSrcBitsPerElt);
7023 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7024 DAG.getUNDEF(SrcVT), Src,
7025 DAG.getIntPtrConstant(0, SDLoc(N)));
7036 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7037 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7038 SmallVectorImpl<int> &Mask) {
7039 int MaskWidth = Mask.size();
7040 SmallVector<SDValue, 16> UsedInputs;
7041 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7042 int lo = UsedInputs.size() * MaskWidth;
7043 int hi = lo + MaskWidth;
7045 // Strip UNDEF input usage.
7046 if (Inputs[i].isUndef())
7048 if ((lo <= M) && (M < hi))
7049 M = SM_SentinelUndef;
7051 // Check for unused inputs.
7052 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7059 // Check for repeated inputs.
7060 bool IsRepeat = false;
7061 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7062 if (UsedInputs[j] != Inputs[i])
7066 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7073 UsedInputs.push_back(Inputs[i]);
7075 Inputs = UsedInputs;
7078 /// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
7079 /// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
7080 /// remaining input indices in case we now have a unary shuffle and adjust the
7081 /// inputs accordingly.
7082 /// Returns true if the target shuffle mask was decoded.
7083 static bool resolveTargetShuffleInputs(SDValue Op,
7084 SmallVectorImpl<SDValue> &Inputs,
7085 SmallVectorImpl<int> &Mask,
7086 SelectionDAG &DAG) {
7087 unsigned NumElts = Op.getValueType().getVectorNumElements();
7088 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7089 if (!setTargetShuffleZeroElements(Op, Mask, Inputs))
7090 if (!getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG))
7093 resolveTargetShuffleInputsAndMask(Inputs, Mask);
7097 /// Returns the scalar element that will make up the ith
7098 /// element of the result of the vector shuffle.
7099 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7102 return SDValue(); // Limit search depth.
7104 SDValue V = SDValue(N, 0);
7105 EVT VT = V.getValueType();
7106 unsigned Opcode = V.getOpcode();
7108 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7109 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7110 int Elt = SV->getMaskElt(Index);
7113 return DAG.getUNDEF(VT.getVectorElementType());
7115 unsigned NumElems = VT.getVectorNumElements();
7116 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7117 : SV->getOperand(1);
7118 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7121 // Recurse into target specific vector shuffles to find scalars.
7122 if (isTargetShuffle(Opcode)) {
7123 MVT ShufVT = V.getSimpleValueType();
7124 MVT ShufSVT = ShufVT.getVectorElementType();
7125 int NumElems = (int)ShufVT.getVectorNumElements();
7126 SmallVector<int, 16> ShuffleMask;
7127 SmallVector<SDValue, 16> ShuffleOps;
7130 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7133 int Elt = ShuffleMask[Index];
7134 if (Elt == SM_SentinelZero)
7135 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7136 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7137 if (Elt == SM_SentinelUndef)
7138 return DAG.getUNDEF(ShufSVT);
7140 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7141 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7142 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7146 // Recurse into insert_subvector base/sub vector to find scalars.
7147 if (Opcode == ISD::INSERT_SUBVECTOR &&
7148 isa<ConstantSDNode>(N->getOperand(2))) {
7149 SDValue Vec = N->getOperand(0);
7150 SDValue Sub = N->getOperand(1);
7151 EVT SubVT = Sub.getValueType();
7152 unsigned NumSubElts = SubVT.getVectorNumElements();
7153 uint64_t SubIdx = N->getConstantOperandVal(2);
7155 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7156 return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7157 return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7160 // Recurse into extract_subvector src vector to find scalars.
7161 if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7162 isa<ConstantSDNode>(N->getOperand(1))) {
7163 SDValue Src = N->getOperand(0);
7164 uint64_t SrcIdx = N->getConstantOperandVal(1);
7165 return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7168 // Actual nodes that may contain scalar elements
7169 if (Opcode == ISD::BITCAST) {
7170 V = V.getOperand(0);
7171 EVT SrcVT = V.getValueType();
7172 unsigned NumElems = VT.getVectorNumElements();
7174 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7178 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7179 return (Index == 0) ? V.getOperand(0)
7180 : DAG.getUNDEF(VT.getVectorElementType());
7182 if (V.getOpcode() == ISD::BUILD_VECTOR)
7183 return V.getOperand(Index);
7188 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7189 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7190 unsigned NumNonZero, unsigned NumZero,
7192 const X86Subtarget &Subtarget) {
7193 MVT VT = Op.getSimpleValueType();
7194 unsigned NumElts = VT.getVectorNumElements();
7195 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7196 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7197 "Illegal vector insertion");
7203 for (unsigned i = 0; i < NumElts; ++i) {
7204 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7208 // If the build vector contains zeros or our first insertion is not the
7209 // first index then insert into zero vector to break any register
7210 // dependency else use SCALAR_TO_VECTOR.
7213 if (NumZero || 0 != i)
7214 V = getZeroVector(VT, Subtarget, DAG, dl);
7216 assert(0 == i && "Expected insertion into zero-index");
7217 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7218 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7219 V = DAG.getBitcast(VT, V);
7223 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7224 DAG.getIntPtrConstant(i, dl));
7230 /// Custom lower build_vector of v16i8.
7231 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7232 unsigned NumNonZero, unsigned NumZero,
7234 const X86Subtarget &Subtarget) {
7235 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7238 // SSE4.1 - use PINSRB to insert each byte directly.
7239 if (Subtarget.hasSSE41())
7240 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7246 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7247 for (unsigned i = 0; i < 16; i += 2) {
7248 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7249 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7250 if (!ThisIsNonZero && !NextIsNonZero)
7253 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7255 if (ThisIsNonZero) {
7256 if (NumZero || NextIsNonZero)
7257 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7259 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7262 if (NextIsNonZero) {
7263 SDValue NextElt = Op.getOperand(i + 1);
7264 if (i == 0 && NumZero)
7265 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7267 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7268 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7269 DAG.getConstant(8, dl, MVT::i8));
7271 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7276 // If our first insertion is not the first index then insert into zero
7277 // vector to break any register dependency else use SCALAR_TO_VECTOR.
7280 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7282 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7283 V = DAG.getBitcast(MVT::v8i16, V);
7287 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7288 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7289 DAG.getIntPtrConstant(i / 2, dl));
7292 return DAG.getBitcast(MVT::v16i8, V);
7295 /// Custom lower build_vector of v8i16.
7296 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7297 unsigned NumNonZero, unsigned NumZero,
7299 const X86Subtarget &Subtarget) {
7300 if (NumNonZero > 4 && !Subtarget.hasSSE41())
7303 // Use PINSRW to insert each byte directly.
7304 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7308 /// Custom lower build_vector of v4i32 or v4f32.
7309 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7310 const X86Subtarget &Subtarget) {
7311 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7312 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7313 // Because we're creating a less complicated build vector here, we may enable
7314 // further folding of the MOVDDUP via shuffle transforms.
7315 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7316 Op.getOperand(0) == Op.getOperand(2) &&
7317 Op.getOperand(1) == Op.getOperand(3) &&
7318 Op.getOperand(0) != Op.getOperand(1)) {
7320 MVT VT = Op.getSimpleValueType();
7321 MVT EltVT = VT.getVectorElementType();
7322 // Create a new build vector with the first 2 elements followed by undef
7323 // padding, bitcast to v2f64, duplicate, and bitcast back.
7324 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7325 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7326 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7327 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7328 return DAG.getBitcast(VT, Dup);
7331 // Find all zeroable elements.
7332 std::bitset<4> Zeroable, Undefs;
7333 for (int i = 0; i < 4; ++i) {
7334 SDValue Elt = Op.getOperand(i);
7335 Undefs[i] = Elt.isUndef();
7336 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7338 assert(Zeroable.size() - Zeroable.count() > 1 &&
7339 "We expect at least two non-zero elements!");
7341 // We only know how to deal with build_vector nodes where elements are either
7342 // zeroable or extract_vector_elt with constant index.
7343 SDValue FirstNonZero;
7344 unsigned FirstNonZeroIdx;
7345 for (unsigned i = 0; i < 4; ++i) {
7348 SDValue Elt = Op.getOperand(i);
7349 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7350 !isa<ConstantSDNode>(Elt.getOperand(1)))
7352 // Make sure that this node is extracting from a 128-bit vector.
7353 MVT VT = Elt.getOperand(0).getSimpleValueType();
7354 if (!VT.is128BitVector())
7356 if (!FirstNonZero.getNode()) {
7358 FirstNonZeroIdx = i;
7362 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7363 SDValue V1 = FirstNonZero.getOperand(0);
7364 MVT VT = V1.getSimpleValueType();
7366 // See if this build_vector can be lowered as a blend with zero.
7368 unsigned EltMaskIdx, EltIdx;
7370 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7371 if (Zeroable[EltIdx]) {
7372 // The zero vector will be on the right hand side.
7373 Mask[EltIdx] = EltIdx+4;
7377 Elt = Op->getOperand(EltIdx);
7378 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7379 EltMaskIdx = Elt.getConstantOperandVal(1);
7380 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7382 Mask[EltIdx] = EltIdx;
7386 // Let the shuffle legalizer deal with blend operations.
7387 SDValue VZeroOrUndef = (Zeroable == Undefs)
7389 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7390 if (V1.getSimpleValueType() != VT)
7391 V1 = DAG.getBitcast(VT, V1);
7392 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7395 // See if we can lower this build_vector to a INSERTPS.
7396 if (!Subtarget.hasSSE41())
7399 SDValue V2 = Elt.getOperand(0);
7400 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7403 bool CanFold = true;
7404 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7408 SDValue Current = Op->getOperand(i);
7409 SDValue SrcVector = Current->getOperand(0);
7412 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7418 assert(V1.getNode() && "Expected at least two non-zero elements!");
7419 if (V1.getSimpleValueType() != MVT::v4f32)
7420 V1 = DAG.getBitcast(MVT::v4f32, V1);
7421 if (V2.getSimpleValueType() != MVT::v4f32)
7422 V2 = DAG.getBitcast(MVT::v4f32, V2);
7424 // Ok, we can emit an INSERTPS instruction.
7425 unsigned ZMask = Zeroable.to_ulong();
7427 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7428 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7430 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7431 DAG.getIntPtrConstant(InsertPSMask, DL));
7432 return DAG.getBitcast(VT, Result);
7435 /// Return a vector logical shift node.
7436 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7437 SelectionDAG &DAG, const TargetLowering &TLI,
7439 assert(VT.is128BitVector() && "Unknown type for VShift");
7440 MVT ShVT = MVT::v16i8;
7441 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7442 SrcOp = DAG.getBitcast(ShVT, SrcOp);
7443 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7444 SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, MVT::i8);
7445 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7448 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7449 SelectionDAG &DAG) {
7451 // Check if the scalar load can be widened into a vector load. And if
7452 // the address is "base + cst" see if the cst can be "absorbed" into
7453 // the shuffle mask.
7454 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7455 SDValue Ptr = LD->getBasePtr();
7456 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
7458 EVT PVT = LD->getValueType(0);
7459 if (PVT != MVT::i32 && PVT != MVT::f32)
7464 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7465 FI = FINode->getIndex();
7467 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7468 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7469 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7470 Offset = Ptr.getConstantOperandVal(1);
7471 Ptr = Ptr.getOperand(0);
7476 // FIXME: 256-bit vector instructions don't require a strict alignment,
7477 // improve this code to support it better.
7478 unsigned RequiredAlign = VT.getSizeInBits()/8;
7479 SDValue Chain = LD->getChain();
7480 // Make sure the stack object alignment is at least 16 or 32.
7481 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7482 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7483 if (MFI.isFixedObjectIndex(FI)) {
7484 // Can't change the alignment. FIXME: It's possible to compute
7485 // the exact stack offset and reference FI + adjust offset instead.
7486 // If someone *really* cares about this. That's the way to implement it.
7489 MFI.setObjectAlignment(FI, RequiredAlign);
7493 // (Offset % 16 or 32) must be multiple of 4. Then address is then
7494 // Ptr + (Offset & ~15).
7497 if ((Offset % RequiredAlign) & 3)
7499 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7502 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7503 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7506 int EltNo = (Offset - StartOffset) >> 2;
7507 unsigned NumElems = VT.getVectorNumElements();
7509 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7510 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
7511 LD->getPointerInfo().getWithOffset(StartOffset));
7513 SmallVector<int, 8> Mask(NumElems, EltNo);
7515 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
7521 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
7522 /// elements can be replaced by a single large load which has the same value as
7523 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
7525 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
7526 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7527 const SDLoc &DL, SelectionDAG &DAG,
7528 const X86Subtarget &Subtarget,
7529 bool isAfterLegalize) {
7530 unsigned NumElems = Elts.size();
7532 int LastLoadedElt = -1;
7533 APInt LoadMask = APInt::getNullValue(NumElems);
7534 APInt ZeroMask = APInt::getNullValue(NumElems);
7535 APInt UndefMask = APInt::getNullValue(NumElems);
7537 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
7539 // For each element in the initializer, see if we've found a load, zero or an
7541 for (unsigned i = 0; i < NumElems; ++i) {
7542 SDValue Elt = peekThroughBitcasts(Elts[i]);
7545 if (Elt.isUndef()) {
7546 UndefMask.setBit(i);
7549 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
7554 // Each loaded element must be the correct fractional portion of the
7555 // requested vector load.
7556 if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
7559 if (!ISD::isNON_EXTLoad(Elt.getNode()))
7562 Loads[i] = cast<LoadSDNode>(Elt);
7566 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
7567 LoadMask.countPopulation()) == NumElems &&
7568 "Incomplete element masks");
7570 // Handle Special Cases - all undef or undef/zero.
7571 if (UndefMask.countPopulation() == NumElems)
7572 return DAG.getUNDEF(VT);
7574 // FIXME: Should we return this as a BUILD_VECTOR instead?
7575 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
7576 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
7577 : DAG.getConstantFP(0.0, DL, VT);
7579 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7580 int FirstLoadedElt = LoadMask.countTrailingZeros();
7581 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
7582 EVT EltBaseVT = EltBase.getValueType();
7583 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
7584 "Register/Memory size mismatch");
7585 LoadSDNode *LDBase = Loads[FirstLoadedElt];
7586 assert(LDBase && "Did not find base load for merging consecutive loads");
7587 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
7588 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
7589 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
7590 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
7592 // Consecutive loads can contain UNDEFS but not ZERO elements.
7593 // Consecutive loads with UNDEFs and ZEROs elements require a
7594 // an additional shuffle stage to clear the ZERO elements.
7595 bool IsConsecutiveLoad = true;
7596 bool IsConsecutiveLoadWithZeros = true;
7597 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
7599 if (!DAG.areNonVolatileConsecutiveLoads(Loads[i], LDBase, BaseSizeInBytes,
7600 i - FirstLoadedElt)) {
7601 IsConsecutiveLoad = false;
7602 IsConsecutiveLoadWithZeros = false;
7605 } else if (ZeroMask[i]) {
7606 IsConsecutiveLoad = false;
7610 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7611 auto MMOFlags = LDBase->getMemOperand()->getFlags();
7612 assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
7613 "Cannot merge volatile loads.");
7615 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
7616 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
7617 for (auto *LD : Loads)
7619 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
7623 // Check if the base load is entirely dereferenceable.
7624 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
7625 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
7627 // LOAD - all consecutive load/undefs (must start/end with a load or be
7628 // entirely dereferenceable). If we have found an entire vector of loads and
7629 // undefs, then return a large load of the entire vector width starting at the
7630 // base pointer. If the vector contains zeros, then attempt to shuffle those
7632 if (FirstLoadedElt == 0 &&
7633 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
7634 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
7635 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
7638 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
7639 // will lower to regular temporal loads and use the cache.
7640 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
7641 VT.is256BitVector() && !Subtarget.hasInt256())
7645 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
7648 return CreateLoad(VT, LDBase);
7650 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
7651 // vector and a zero vector to clear out the zero elements.
7652 if (!isAfterLegalize && VT.isVector()) {
7653 unsigned NumMaskElts = VT.getVectorNumElements();
7654 if ((NumMaskElts % NumElems) == 0) {
7655 unsigned Scale = NumMaskElts / NumElems;
7656 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
7657 for (unsigned i = 0; i < NumElems; ++i) {
7660 int Offset = ZeroMask[i] ? NumMaskElts : 0;
7661 for (unsigned j = 0; j != Scale; ++j)
7662 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
7664 SDValue V = CreateLoad(VT, LDBase);
7665 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
7666 : DAG.getConstantFP(0.0, DL, VT);
7667 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
7672 // If the upper half of a ymm/zmm load is undef then just load the lower half.
7673 if (VT.is256BitVector() || VT.is512BitVector()) {
7674 unsigned HalfNumElems = NumElems / 2;
7675 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
7677 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
7679 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
7680 DAG, Subtarget, isAfterLegalize);
7682 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
7683 HalfLD, DAG.getIntPtrConstant(0, DL));
7687 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
7688 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
7689 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
7690 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
7691 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
7692 : MVT::getIntegerVT(LoadSizeInBits);
7693 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
7694 if (TLI.isTypeLegal(VecVT)) {
7695 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
7696 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
7698 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
7699 LDBase->getPointerInfo(),
7700 LDBase->getAlignment(),
7701 MachineMemOperand::MOLoad);
7702 for (auto *LD : Loads)
7704 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
7705 return DAG.getBitcast(VT, ResNode);
7709 // BROADCAST - match the smallest possible repetition pattern, load that
7710 // scalar/subvector element and then broadcast to the entire vector.
7711 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
7712 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
7713 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
7714 unsigned RepeatSize = SubElems * BaseSizeInBits;
7715 unsigned ScalarSize = std::min(RepeatSize, 64u);
7716 if (!Subtarget.hasAVX2() && ScalarSize < 32)
7720 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
7721 for (unsigned i = 0; i != NumElems && Match; ++i) {
7724 SDValue Elt = peekThroughBitcasts(Elts[i]);
7725 if (RepeatedLoads[i % SubElems].isUndef())
7726 RepeatedLoads[i % SubElems] = Elt;
7728 Match &= (RepeatedLoads[i % SubElems] == Elt);
7731 // We must have loads at both ends of the repetition.
7732 Match &= !RepeatedLoads.front().isUndef();
7733 Match &= !RepeatedLoads.back().isUndef();
7738 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
7739 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
7740 : EVT::getFloatingPointVT(ScalarSize);
7741 if (RepeatSize > ScalarSize)
7742 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
7743 RepeatSize / ScalarSize);
7745 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
7746 VT.getSizeInBits() / ScalarSize);
7747 if (TLI.isTypeLegal(BroadcastVT)) {
7748 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
7749 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
7750 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
7751 : X86ISD::VBROADCAST;
7752 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
7753 return DAG.getBitcast(VT, Broadcast);
7762 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
7763 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
7764 // are consecutive, non-overlapping, and in the right order.
7765 static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
7767 const X86Subtarget &Subtarget,
7768 bool isAfterLegalize) {
7769 SmallVector<SDValue, 64> Elts;
7770 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7771 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
7772 Elts.push_back(Elt);
7777 assert(Elts.size() == VT.getVectorNumElements());
7778 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
7782 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
7783 unsigned SplatBitSize, LLVMContext &C) {
7784 unsigned ScalarSize = VT.getScalarSizeInBits();
7785 unsigned NumElm = SplatBitSize / ScalarSize;
7787 SmallVector<Constant *, 32> ConstantVec;
7788 for (unsigned i = 0; i < NumElm; i++) {
7789 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
7791 if (VT.isFloatingPoint()) {
7792 if (ScalarSize == 32) {
7793 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7795 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7796 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7799 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
7800 ConstantVec.push_back(Const);
7802 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7805 static bool isFoldableUseOfShuffle(SDNode *N) {
7806 for (auto *U : N->uses()) {
7807 unsigned Opc = U->getOpcode();
7808 // VPERMV/VPERMV3 shuffles can never fold their index operands.
7809 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
7811 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
7813 if (isTargetShuffle(Opc))
7815 if (Opc == ISD::BITCAST) // Ignore bitcasts
7816 return isFoldableUseOfShuffle(U);
7823 // Check if the current node of build vector is a zero extended vector.
7824 // // If so, return the value extended.
7825 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
7826 // // NumElt - return the number of zero extended identical values.
7827 // // EltType - return the type of the value include the zero extend.
7828 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
7829 unsigned &NumElt, MVT &EltType) {
7830 SDValue ExtValue = Op->getOperand(0);
7831 unsigned NumElts = Op->getNumOperands();
7832 unsigned Delta = NumElts;
7834 for (unsigned i = 1; i < NumElts; i++) {
7835 if (Op->getOperand(i) == ExtValue) {
7839 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
7842 if (!isPowerOf2_32(Delta) || Delta == 1)
7845 for (unsigned i = Delta; i < NumElts; i++) {
7846 if (i % Delta == 0) {
7847 if (Op->getOperand(i) != ExtValue)
7849 } else if (!(isNullConstant(Op->getOperand(i)) ||
7850 Op->getOperand(i).isUndef()))
7853 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
7854 unsigned ExtVTSize = EltSize * Delta;
7855 EltType = MVT::getIntegerVT(ExtVTSize);
7856 NumElt = NumElts / Delta;
7860 /// Attempt to use the vbroadcast instruction to generate a splat value
7861 /// from a splat BUILD_VECTOR which uses:
7862 /// a. A single scalar load, or a constant.
7863 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
7865 /// The VBROADCAST node is returned when a pattern is found,
7866 /// or SDValue() otherwise.
7867 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
7868 const X86Subtarget &Subtarget,
7869 SelectionDAG &DAG) {
7870 // VBROADCAST requires AVX.
7871 // TODO: Splats could be generated for non-AVX CPUs using SSE
7872 // instructions, but there's less potential gain for only 128-bit vectors.
7873 if (!Subtarget.hasAVX())
7876 MVT VT = BVOp->getSimpleValueType(0);
7879 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7880 "Unsupported vector type for broadcast.");
7882 BitVector UndefElements;
7883 SDValue Ld = BVOp->getSplatValue(&UndefElements);
7885 // Attempt to use VBROADCASTM
7886 // From this paterrn:
7887 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
7888 // b. t1 = (build_vector t0 t0)
7890 // Create (VBROADCASTM v2i1 X)
7891 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
7892 MVT EltType = VT.getScalarType();
7893 unsigned NumElts = VT.getVectorNumElements();
7895 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
7896 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
7897 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
7898 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
7900 BOperand = ZeroExtended.getOperand(0);
7902 BOperand = Ld.getOperand(0).getOperand(0);
7903 MVT MaskVT = BOperand.getSimpleValueType();
7904 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
7905 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
7907 DAG.getNode(X86ISD::VBROADCASTM, dl,
7908 MVT::getVectorVT(EltType, NumElts), BOperand);
7909 return DAG.getBitcast(VT, Brdcst);
7914 unsigned NumElts = VT.getVectorNumElements();
7915 unsigned NumUndefElts = UndefElements.count();
7916 if (!Ld || (NumElts - NumUndefElts) <= 1) {
7917 APInt SplatValue, Undef;
7918 unsigned SplatBitSize;
7920 // Check if this is a repeated constant pattern suitable for broadcasting.
7921 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
7922 SplatBitSize > VT.getScalarSizeInBits() &&
7923 SplatBitSize < VT.getSizeInBits()) {
7924 // Avoid replacing with broadcast when it's a use of a shuffle
7925 // instruction to preserve the present custom lowering of shuffles.
7926 if (isFoldableUseOfShuffle(BVOp))
7928 // replace BUILD_VECTOR with broadcast of the repeated constants.
7929 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7930 LLVMContext *Ctx = DAG.getContext();
7931 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
7932 if (Subtarget.hasAVX()) {
7933 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
7934 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
7935 // Splatted value can fit in one INTEGER constant in constant pool.
7936 // Load the constant and broadcast it.
7937 MVT CVT = MVT::getIntegerVT(SplatBitSize);
7938 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
7939 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
7940 SDValue CP = DAG.getConstantPool(C, PVT);
7941 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7943 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
7945 CVT, dl, DAG.getEntryNode(), CP,
7946 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7948 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
7949 MVT::getVectorVT(CVT, Repeat), Ld);
7950 return DAG.getBitcast(VT, Brdcst);
7951 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
7952 // Splatted value can fit in one FLOAT constant in constant pool.
7953 // Load the constant and broadcast it.
7954 // AVX have support for 32 and 64 bit broadcast for floats only.
7955 // No 64bit integer in 32bit subtarget.
7956 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
7957 // Lower the splat via APFloat directly, to avoid any conversion.
7960 ? ConstantFP::get(*Ctx,
7961 APFloat(APFloat::IEEEsingle(), SplatValue))
7962 : ConstantFP::get(*Ctx,
7963 APFloat(APFloat::IEEEdouble(), SplatValue));
7964 SDValue CP = DAG.getConstantPool(C, PVT);
7965 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7967 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
7969 CVT, dl, DAG.getEntryNode(), CP,
7970 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7972 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
7973 MVT::getVectorVT(CVT, Repeat), Ld);
7974 return DAG.getBitcast(VT, Brdcst);
7975 } else if (SplatBitSize > 64) {
7976 // Load the vector of constants and broadcast it.
7977 MVT CVT = VT.getScalarType();
7978 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
7980 SDValue VCP = DAG.getConstantPool(VecC, PVT);
7981 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
7982 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
7984 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
7985 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
7987 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
7988 return DAG.getBitcast(VT, Brdcst);
7993 // If we are moving a scalar into a vector (Ld must be set and all elements
7994 // but 1 are undef) and that operation is not obviously supported by
7995 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
7996 // That's better than general shuffling and may eliminate a load to GPR and
7997 // move from scalar to vector register.
7998 if (!Ld || NumElts - NumUndefElts != 1)
8000 unsigned ScalarSize = Ld.getValueSizeInBits();
8001 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8005 bool ConstSplatVal =
8006 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8008 // Make sure that all of the users of a non-constant load are from the
8009 // BUILD_VECTOR node.
8010 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8013 unsigned ScalarSize = Ld.getValueSizeInBits();
8014 bool IsGE256 = (VT.getSizeInBits() >= 256);
8016 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8017 // instruction to save 8 or more bytes of constant pool data.
8018 // TODO: If multiple splats are generated to load the same constant,
8019 // it may be detrimental to overall size. There needs to be a way to detect
8020 // that condition to know if this is truly a size win.
8021 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
8023 // Handle broadcasting a single constant scalar from the constant pool
8025 // On Sandybridge (no AVX2), it is still better to load a constant vector
8026 // from the constant pool and not to broadcast it from a scalar.
8027 // But override that restriction when optimizing for size.
8028 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8029 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8030 EVT CVT = Ld.getValueType();
8031 assert(!CVT.isVector() && "Must not broadcast a vector type");
8033 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8034 // For size optimization, also splat v2f64 and v2i64, and for size opt
8035 // with AVX2, also splat i8 and i16.
8036 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8037 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8038 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8039 const Constant *C = nullptr;
8040 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8041 C = CI->getConstantIntValue();
8042 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8043 C = CF->getConstantFPValue();
8045 assert(C && "Invalid constant type");
8047 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8049 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8050 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8052 CVT, dl, DAG.getEntryNode(), CP,
8053 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8056 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8060 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8062 // Handle AVX2 in-register broadcasts.
8063 if (!IsLoad && Subtarget.hasInt256() &&
8064 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8065 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8067 // The scalar source must be a normal load.
8071 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8072 (Subtarget.hasVLX() && ScalarSize == 64))
8073 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8075 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8076 // double since there is no vbroadcastsd xmm
8077 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8078 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8079 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8082 // Unsupported broadcast.
8086 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8087 /// underlying vector and index.
8089 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8091 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8093 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8094 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8097 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8099 // (extract_vector_elt (v8f32 %1), Constant<6>)
8101 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8102 // (extract_subvector (v8f32 %0), Constant<4>),
8105 // In this case the vector is the extract_subvector expression and the index
8106 // is 2, as specified by the shuffle.
8107 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8108 SDValue ShuffleVec = SVOp->getOperand(0);
8109 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8110 assert(ShuffleVecVT.getVectorElementType() ==
8111 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8113 int ShuffleIdx = SVOp->getMaskElt(Idx);
8114 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8115 ExtractedFromVec = ShuffleVec;
8121 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8122 MVT VT = Op.getSimpleValueType();
8124 // Skip if insert_vec_elt is not supported.
8125 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8126 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8130 unsigned NumElems = Op.getNumOperands();
8134 SmallVector<unsigned, 4> InsertIndices;
8135 SmallVector<int, 8> Mask(NumElems, -1);
8137 for (unsigned i = 0; i != NumElems; ++i) {
8138 unsigned Opc = Op.getOperand(i).getOpcode();
8140 if (Opc == ISD::UNDEF)
8143 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8144 // Quit if more than 1 elements need inserting.
8145 if (InsertIndices.size() > 1)
8148 InsertIndices.push_back(i);
8152 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8153 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8155 // Quit if non-constant index.
8156 if (!isa<ConstantSDNode>(ExtIdx))
8158 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8160 // Quit if extracted from vector of different type.
8161 if (ExtractedFromVec.getValueType() != VT)
8164 if (!VecIn1.getNode())
8165 VecIn1 = ExtractedFromVec;
8166 else if (VecIn1 != ExtractedFromVec) {
8167 if (!VecIn2.getNode())
8168 VecIn2 = ExtractedFromVec;
8169 else if (VecIn2 != ExtractedFromVec)
8170 // Quit if more than 2 vectors to shuffle
8174 if (ExtractedFromVec == VecIn1)
8176 else if (ExtractedFromVec == VecIn2)
8177 Mask[i] = Idx + NumElems;
8180 if (!VecIn1.getNode())
8183 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8184 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8186 for (unsigned Idx : InsertIndices)
8187 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8188 DAG.getIntPtrConstant(Idx, DL));
8193 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8194 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8195 Op.getScalarValueSizeInBits() == 1 &&
8196 "Can not convert non-constant vector");
8197 uint64_t Immediate = 0;
8198 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8199 SDValue In = Op.getOperand(idx);
8201 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8204 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8205 return DAG.getConstant(Immediate, dl, VT);
8207 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8208 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8209 const X86Subtarget &Subtarget) {
8211 MVT VT = Op.getSimpleValueType();
8212 assert((VT.getVectorElementType() == MVT::i1) &&
8213 "Unexpected type in LowerBUILD_VECTORvXi1!");
8216 if (ISD::isBuildVectorAllZeros(Op.getNode()))
8219 if (ISD::isBuildVectorAllOnes(Op.getNode()))
8222 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
8223 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8224 // Split the pieces.
8226 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
8228 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
8229 // We have to manually lower both halves so getNode doesn't try to
8230 // reassemble the build_vector.
8231 Lower = LowerBUILD_VECTORvXi1(Lower, DAG, Subtarget);
8232 Upper = LowerBUILD_VECTORvXi1(Upper, DAG, Subtarget);
8233 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
8235 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
8236 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
8237 return DAG.getBitcast(VT, Imm);
8238 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
8239 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
8240 DAG.getIntPtrConstant(0, dl));
8243 // Vector has one or more non-const elements
8244 uint64_t Immediate = 0;
8245 SmallVector<unsigned, 16> NonConstIdx;
8246 bool IsSplat = true;
8247 bool HasConstElts = false;
8249 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8250 SDValue In = Op.getOperand(idx);
8253 if (!isa<ConstantSDNode>(In))
8254 NonConstIdx.push_back(idx);
8256 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8257 HasConstElts = true;
8261 else if (In != Op.getOperand(SplatIdx))
8265 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8267 return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx),
8268 DAG.getConstant(1, dl, VT),
8269 DAG.getConstant(0, dl, VT));
8271 // insert elements one by one
8275 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
8276 Imm = DAG.getConstant(Immediate, dl, ImmVT);
8278 else if (HasConstElts)
8279 Imm = DAG.getConstant(0, dl, VT);
8281 Imm = DAG.getUNDEF(VT);
8282 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
8283 DstVec = DAG.getBitcast(VT, Imm);
8285 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
8286 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
8287 DAG.getIntPtrConstant(0, dl));
8290 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8291 unsigned InsertIdx = NonConstIdx[i];
8292 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8293 Op.getOperand(InsertIdx),
8294 DAG.getIntPtrConstant(InsertIdx, dl));
8299 /// This is a helper function of LowerToHorizontalOp().
8300 /// This function checks that the build_vector \p N in input implements a
8301 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8302 /// may not match the layout of an x86 256-bit horizontal instruction.
8303 /// In other words, if this returns true, then some extraction/insertion will
8304 /// be required to produce a valid horizontal instruction.
8306 /// Parameter \p Opcode defines the kind of horizontal operation to match.
8307 /// For example, if \p Opcode is equal to ISD::ADD, then this function
8308 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8309 /// is equal to ISD::SUB, then this function checks if this is a horizontal
8312 /// This function only analyzes elements of \p N whose indices are
8313 /// in range [BaseIdx, LastIdx).
8315 /// TODO: This function was originally used to match both real and fake partial
8316 /// horizontal operations, but the index-matching logic is incorrect for that.
8317 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
8318 /// code because it is only used for partial h-op matching now?
8319 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8321 unsigned BaseIdx, unsigned LastIdx,
8322 SDValue &V0, SDValue &V1) {
8323 EVT VT = N->getValueType(0);
8324 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8325 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8326 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8327 "Invalid Vector in input!");
8329 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8330 bool CanFold = true;
8331 unsigned ExpectedVExtractIdx = BaseIdx;
8332 unsigned NumElts = LastIdx - BaseIdx;
8333 V0 = DAG.getUNDEF(VT);
8334 V1 = DAG.getUNDEF(VT);
8336 // Check if N implements a horizontal binop.
8337 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8338 SDValue Op = N->getOperand(i + BaseIdx);
8341 if (Op->isUndef()) {
8342 // Update the expected vector extract index.
8343 if (i * 2 == NumElts)
8344 ExpectedVExtractIdx = BaseIdx;
8345 ExpectedVExtractIdx += 2;
8349 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8354 SDValue Op0 = Op.getOperand(0);
8355 SDValue Op1 = Op.getOperand(1);
8357 // Try to match the following pattern:
8358 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8359 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8360 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8361 Op0.getOperand(0) == Op1.getOperand(0) &&
8362 isa<ConstantSDNode>(Op0.getOperand(1)) &&
8363 isa<ConstantSDNode>(Op1.getOperand(1)));
8367 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8368 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8370 if (i * 2 < NumElts) {
8372 V0 = Op0.getOperand(0);
8373 if (V0.getValueType() != VT)
8378 V1 = Op0.getOperand(0);
8379 if (V1.getValueType() != VT)
8382 if (i * 2 == NumElts)
8383 ExpectedVExtractIdx = BaseIdx;
8386 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8387 if (I0 == ExpectedVExtractIdx)
8388 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8389 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8390 // Try to match the following dag sequence:
8391 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8392 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8396 ExpectedVExtractIdx += 2;
8402 /// Emit a sequence of two 128-bit horizontal add/sub followed by
8403 /// a concat_vector.
8405 /// This is a helper function of LowerToHorizontalOp().
8406 /// This function expects two 256-bit vectors called V0 and V1.
8407 /// At first, each vector is split into two separate 128-bit vectors.
8408 /// Then, the resulting 128-bit vectors are used to implement two
8409 /// horizontal binary operations.
8411 /// The kind of horizontal binary operation is defined by \p X86Opcode.
8413 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8414 /// the two new horizontal binop.
8415 /// When Mode is set, the first horizontal binop dag node would take as input
8416 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8417 /// horizontal binop dag node would take as input the lower 128-bit of V1
8418 /// and the upper 128-bit of V1.
8420 /// HADD V0_LO, V0_HI
8421 /// HADD V1_LO, V1_HI
8423 /// Otherwise, the first horizontal binop dag node takes as input the lower
8424 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8425 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8427 /// HADD V0_LO, V1_LO
8428 /// HADD V0_HI, V1_HI
8430 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8431 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8432 /// the upper 128-bits of the result.
8433 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8434 const SDLoc &DL, SelectionDAG &DAG,
8435 unsigned X86Opcode, bool Mode,
8436 bool isUndefLO, bool isUndefHI) {
8437 MVT VT = V0.getSimpleValueType();
8438 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8439 "Invalid nodes in input!");
8441 unsigned NumElts = VT.getVectorNumElements();
8442 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8443 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8444 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8445 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8446 MVT NewVT = V0_LO.getSimpleValueType();
8448 SDValue LO = DAG.getUNDEF(NewVT);
8449 SDValue HI = DAG.getUNDEF(NewVT);
8452 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8453 if (!isUndefLO && !V0->isUndef())
8454 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8455 if (!isUndefHI && !V1->isUndef())
8456 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
8458 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8459 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
8460 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
8462 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
8463 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
8466 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
8469 /// Returns true iff \p BV builds a vector with the result equivalent to
8470 /// the result of ADDSUB/SUBADD operation.
8471 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
8472 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
8473 /// \p Opnd0 and \p Opnd1.
8474 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
8475 const X86Subtarget &Subtarget, SelectionDAG &DAG,
8476 SDValue &Opnd0, SDValue &Opnd1,
8477 unsigned &NumExtracts,
8480 MVT VT = BV->getSimpleValueType(0);
8481 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
8484 unsigned NumElts = VT.getVectorNumElements();
8485 SDValue InVec0 = DAG.getUNDEF(VT);
8486 SDValue InVec1 = DAG.getUNDEF(VT);
8490 // Odd-numbered elements in the input build vector are obtained from
8491 // adding/subtracting two integer/float elements.
8492 // Even-numbered elements in the input build vector are obtained from
8493 // subtracting/adding two integer/float elements.
8494 unsigned Opc[2] = {0, 0};
8495 for (unsigned i = 0, e = NumElts; i != e; ++i) {
8496 SDValue Op = BV->getOperand(i);
8498 // Skip 'undef' values.
8499 unsigned Opcode = Op.getOpcode();
8500 if (Opcode == ISD::UNDEF)
8503 // Early exit if we found an unexpected opcode.
8504 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
8507 SDValue Op0 = Op.getOperand(0);
8508 SDValue Op1 = Op.getOperand(1);
8510 // Try to match the following pattern:
8511 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
8512 // Early exit if we cannot match that sequence.
8513 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8514 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8515 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8516 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
8517 Op0.getOperand(1) != Op1.getOperand(1))
8520 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8524 // We found a valid add/sub node, make sure its the same opcode as previous
8525 // elements for this parity.
8526 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
8528 Opc[i % 2] = Opcode;
8530 // Update InVec0 and InVec1.
8531 if (InVec0.isUndef()) {
8532 InVec0 = Op0.getOperand(0);
8533 if (InVec0.getSimpleValueType() != VT)
8536 if (InVec1.isUndef()) {
8537 InVec1 = Op1.getOperand(0);
8538 if (InVec1.getSimpleValueType() != VT)
8542 // Make sure that operands in input to each add/sub node always
8543 // come from a same pair of vectors.
8544 if (InVec0 != Op0.getOperand(0)) {
8545 if (Opcode == ISD::FSUB)
8548 // FADD is commutable. Try to commute the operands
8549 // and then test again.
8550 std::swap(Op0, Op1);
8551 if (InVec0 != Op0.getOperand(0))
8555 if (InVec1 != Op1.getOperand(0))
8558 // Increment the number of extractions done.
8562 // Ensure we have found an opcode for both parities and that they are
8563 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
8564 // inputs are undef.
8565 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
8566 InVec0.isUndef() || InVec1.isUndef())
8569 IsSubAdd = Opc[0] == ISD::FADD;
8576 /// Returns true if is possible to fold MUL and an idiom that has already been
8577 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
8578 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
8579 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
8581 /// Prior to calling this function it should be known that there is some
8582 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
8583 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
8584 /// before replacement of such SDNode with ADDSUB operation. Thus the number
8585 /// of \p Opnd0 uses is expected to be equal to 2.
8586 /// For example, this function may be called for the following IR:
8587 /// %AB = fmul fast <2 x double> %A, %B
8588 /// %Sub = fsub fast <2 x double> %AB, %C
8589 /// %Add = fadd fast <2 x double> %AB, %C
8590 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
8591 /// <2 x i32> <i32 0, i32 3>
8592 /// There is a def for %Addsub here, which potentially can be replaced by
8593 /// X86ISD::ADDSUB operation:
8594 /// %Addsub = X86ISD::ADDSUB %AB, %C
8595 /// and such ADDSUB can further be replaced with FMADDSUB:
8596 /// %Addsub = FMADDSUB %A, %B, %C.
8598 /// The main reason why this method is called before the replacement of the
8599 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
8600 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
8602 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
8604 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
8605 unsigned ExpectedUses) {
8606 if (Opnd0.getOpcode() != ISD::FMUL ||
8607 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
8610 // FIXME: These checks must match the similar ones in
8611 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
8612 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
8613 // or MUL + ADDSUB to FMADDSUB.
8614 const TargetOptions &Options = DAG.getTarget().Options;
8616 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
8621 Opnd1 = Opnd0.getOperand(1);
8622 Opnd0 = Opnd0.getOperand(0);
8627 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
8628 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
8629 /// X86ISD::FMSUBADD node.
8630 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
8631 const X86Subtarget &Subtarget,
8632 SelectionDAG &DAG) {
8633 SDValue Opnd0, Opnd1;
8634 unsigned NumExtracts;
8636 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
8640 MVT VT = BV->getSimpleValueType(0);
8643 // Try to generate X86ISD::FMADDSUB node here.
8645 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
8646 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
8647 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
8650 // We only support ADDSUB.
8654 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
8655 // the ADDSUB idiom has been successfully recognized. There are no known
8656 // X86 targets with 512-bit ADDSUB instructions!
8657 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
8659 if (VT.is512BitVector())
8662 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
8665 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
8666 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
8667 // Initialize outputs to known values.
8668 MVT VT = BV->getSimpleValueType(0);
8669 HOpcode = ISD::DELETED_NODE;
8670 V0 = DAG.getUNDEF(VT);
8671 V1 = DAG.getUNDEF(VT);
8673 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
8674 // half of the result is calculated independently from the 128-bit halves of
8675 // the inputs, so that makes the index-checking logic below more complicated.
8676 unsigned NumElts = VT.getVectorNumElements();
8677 unsigned GenericOpcode = ISD::DELETED_NODE;
8678 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
8679 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
8680 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
8681 for (unsigned i = 0; i != Num128BitChunks; ++i) {
8682 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
8683 // Ignore undef elements.
8684 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
8688 // If there's an opcode mismatch, we're done.
8689 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
8692 // Initialize horizontal opcode.
8693 if (HOpcode == ISD::DELETED_NODE) {
8694 GenericOpcode = Op.getOpcode();
8695 switch (GenericOpcode) {
8696 case ISD::ADD: HOpcode = X86ISD::HADD; break;
8697 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
8698 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
8699 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8700 default: return false;
8704 SDValue Op0 = Op.getOperand(0);
8705 SDValue Op1 = Op.getOperand(1);
8706 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8707 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8708 Op0.getOperand(0) != Op1.getOperand(0) ||
8709 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8710 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8713 // The source vector is chosen based on which 64-bit half of the
8714 // destination vector is being calculated.
8715 if (j < NumEltsIn64Bits) {
8717 V0 = Op0.getOperand(0);
8720 V1 = Op0.getOperand(0);
8723 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8724 if (SourceVec != Op0.getOperand(0))
8727 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8728 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8729 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8730 unsigned ExpectedIndex = i * NumEltsIn128Bits +
8731 (j % NumEltsIn64Bits) * 2;
8732 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8735 // If this is not a commutative op, this does not match.
8736 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8739 // Addition is commutative, so try swapping the extract indexes.
8740 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8741 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8744 // Extract indexes do not match horizontal requirement.
8748 // We matched. Opcode and operands are returned by reference as arguments.
8752 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
8753 SelectionDAG &DAG, unsigned HOpcode,
8754 SDValue V0, SDValue V1) {
8755 // If either input vector is not the same size as the build vector,
8756 // extract/insert the low bits to the correct size.
8757 // This is free (examples: zmm --> xmm, xmm --> ymm).
8758 MVT VT = BV->getSimpleValueType(0);
8759 unsigned Width = VT.getSizeInBits();
8760 if (V0.getValueSizeInBits() > Width)
8761 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
8762 else if (V0.getValueSizeInBits() < Width)
8763 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
8765 if (V1.getValueSizeInBits() > Width)
8766 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
8767 else if (V1.getValueSizeInBits() < Width)
8768 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
8770 unsigned NumElts = VT.getVectorNumElements();
8771 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
8772 for (unsigned i = 0; i != NumElts; ++i)
8773 if (BV->getOperand(i).isUndef())
8774 DemandedElts.clearBit(i);
8776 // If we don't need the upper xmm, then perform as a xmm hop.
8777 unsigned HalfNumElts = NumElts / 2;
8778 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
8779 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), HalfNumElts);
8780 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
8781 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
8782 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
8783 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
8786 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
8789 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
8790 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
8791 const X86Subtarget &Subtarget,
8792 SelectionDAG &DAG) {
8793 // We need at least 2 non-undef elements to make this worthwhile by default.
8794 unsigned NumNonUndefs =
8795 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
8796 if (NumNonUndefs < 2)
8799 // There are 4 sets of horizontal math operations distinguished by type:
8800 // int/FP at 128-bit/256-bit. Each type was introduced with a different
8801 // subtarget feature. Try to match those "native" patterns first.
8802 MVT VT = BV->getSimpleValueType(0);
8803 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
8804 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
8805 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
8806 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
8809 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
8810 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
8813 // Try harder to match 256-bit ops by using extract/concat.
8814 if (!Subtarget.hasAVX() || !VT.is256BitVector())
8817 // Count the number of UNDEF operands in the build_vector in input.
8818 unsigned NumElts = VT.getVectorNumElements();
8819 unsigned Half = NumElts / 2;
8820 unsigned NumUndefsLO = 0;
8821 unsigned NumUndefsHI = 0;
8822 for (unsigned i = 0, e = Half; i != e; ++i)
8823 if (BV->getOperand(i)->isUndef())
8826 for (unsigned i = Half, e = NumElts; i != e; ++i)
8827 if (BV->getOperand(i)->isUndef())
8831 SDValue InVec0, InVec1;
8832 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
8833 SDValue InVec2, InVec3;
8835 bool CanFold = true;
8837 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
8838 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
8840 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8841 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8842 X86Opcode = X86ISD::HADD;
8843 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
8845 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
8847 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8848 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8849 X86Opcode = X86ISD::HSUB;
8854 // Do not try to expand this build_vector into a pair of horizontal
8855 // add/sub if we can emit a pair of scalar add/sub.
8856 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8859 // Convert this build_vector into a pair of horizontal binops followed by
8860 // a concat vector. We must adjust the outputs from the partial horizontal
8861 // matching calls above to account for undefined vector halves.
8862 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
8863 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
8864 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
8865 bool isUndefLO = NumUndefsLO == Half;
8866 bool isUndefHI = NumUndefsHI == Half;
8867 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
8872 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
8873 VT == MVT::v16i16) {
8875 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
8876 X86Opcode = X86ISD::HADD;
8877 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
8879 X86Opcode = X86ISD::HSUB;
8880 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
8882 X86Opcode = X86ISD::FHADD;
8883 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
8885 X86Opcode = X86ISD::FHSUB;
8889 // Don't try to expand this build_vector into a pair of horizontal add/sub
8890 // if we can simply emit a pair of scalar add/sub.
8891 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8894 // Convert this build_vector into two horizontal add/sub followed by
8896 bool isUndefLO = NumUndefsLO == Half;
8897 bool isUndefHI = NumUndefsHI == Half;
8898 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
8899 isUndefLO, isUndefHI);
8905 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
8906 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
8907 /// just apply the bit to the vectors.
8908 /// NOTE: Its not in our interest to start make a general purpose vectorizer
8909 /// from this, but enough scalar bit operations are created from the later
8910 /// legalization + scalarization stages to need basic support.
8911 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
8912 SelectionDAG &DAG) {
8914 MVT VT = Op->getSimpleValueType(0);
8915 unsigned NumElems = VT.getVectorNumElements();
8916 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8918 // Check that all elements have the same opcode.
8919 // TODO: Should we allow UNDEFS and if so how many?
8920 unsigned Opcode = Op->getOperand(0).getOpcode();
8921 for (unsigned i = 1; i < NumElems; ++i)
8922 if (Opcode != Op->getOperand(i).getOpcode())
8925 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
8926 bool IsShift = false;
8938 // Don't do this if the buildvector is a splat - we'd replace one
8939 // constant with an entire vector.
8940 if (Op->getSplatValue())
8942 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
8947 SmallVector<SDValue, 4> LHSElts, RHSElts;
8948 for (SDValue Elt : Op->ops()) {
8949 SDValue LHS = Elt.getOperand(0);
8950 SDValue RHS = Elt.getOperand(1);
8952 // We expect the canonicalized RHS operand to be the constant.
8953 if (!isa<ConstantSDNode>(RHS))
8956 // Extend shift amounts.
8957 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
8960 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
8963 LHSElts.push_back(LHS);
8964 RHSElts.push_back(RHS);
8967 // Limit to shifts by uniform immediates.
8968 // TODO: Only accept vXi8/vXi64 special cases?
8969 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
8970 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
8973 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
8974 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
8975 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
8978 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
8979 /// functionality to do this, so it's all zeros, all ones, or some derivation
8980 /// that is cheap to calculate.
8981 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
8982 const X86Subtarget &Subtarget) {
8984 MVT VT = Op.getSimpleValueType();
8986 // Vectors containing all zeros can be matched by pxor and xorps.
8987 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
8988 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
8989 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
8990 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
8993 return getZeroVector(VT, Subtarget, DAG, DL);
8996 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
8997 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
8998 // vpcmpeqd on 256-bit vectors.
8999 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9000 if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
9001 (VT == MVT::v8i32 && Subtarget.hasInt256()))
9004 return getOnesVector(VT, DAG, DL);
9010 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9011 /// from a vector of source values and a vector of extraction indices.
9012 /// The vectors might be manipulated to match the type of the permute op.
9013 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9014 SDLoc &DL, SelectionDAG &DAG,
9015 const X86Subtarget &Subtarget) {
9017 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9018 unsigned NumElts = VT.getVectorNumElements();
9019 unsigned SizeInBits = VT.getSizeInBits();
9021 // Adjust IndicesVec to match VT size.
9022 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9023 "Illegal variable permute mask size");
9024 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9025 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9026 NumElts * VT.getScalarSizeInBits());
9027 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9029 // Handle SrcVec that don't match VT type.
9030 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9031 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9032 // Handle larger SrcVec by treating it as a larger permute.
9033 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9034 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9035 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9036 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9037 Subtarget, DAG, SDLoc(IndicesVec));
9038 return extractSubVector(
9039 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9040 DAG, DL, SizeInBits);
9041 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9042 // Widen smaller SrcVec to match VT.
9043 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9048 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9049 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9050 EVT SrcVT = Idx.getValueType();
9051 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9052 uint64_t IndexScale = 0;
9053 uint64_t IndexOffset = 0;
9055 // If we're scaling a smaller permute op, then we need to repeat the
9056 // indices, scaling and offsetting them as well.
9057 // e.g. v4i32 -> v16i8 (Scale = 4)
9058 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9059 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9060 for (uint64_t i = 0; i != Scale; ++i) {
9061 IndexScale |= Scale << (i * NumDstBits);
9062 IndexOffset |= i << (i * NumDstBits);
9065 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9066 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9067 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9068 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9072 unsigned Opcode = 0;
9073 switch (VT.SimpleTy) {
9077 if (Subtarget.hasSSSE3())
9078 Opcode = X86ISD::PSHUFB;
9081 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9082 Opcode = X86ISD::VPERMV;
9083 else if (Subtarget.hasSSSE3()) {
9084 Opcode = X86ISD::PSHUFB;
9085 ShuffleVT = MVT::v16i8;
9090 if (Subtarget.hasAVX()) {
9091 Opcode = X86ISD::VPERMILPV;
9092 ShuffleVT = MVT::v4f32;
9093 } else if (Subtarget.hasSSSE3()) {
9094 Opcode = X86ISD::PSHUFB;
9095 ShuffleVT = MVT::v16i8;
9100 if (Subtarget.hasAVX()) {
9101 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9102 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9103 Opcode = X86ISD::VPERMILPV;
9104 ShuffleVT = MVT::v2f64;
9105 } else if (Subtarget.hasSSE41()) {
9106 // SSE41 can compare v2i64 - select between indices 0 and 1.
9107 return DAG.getSelectCC(
9109 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9110 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9111 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9112 ISD::CondCode::SETEQ);
9116 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9117 Opcode = X86ISD::VPERMV;
9118 else if (Subtarget.hasXOP()) {
9119 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9120 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9121 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9122 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9124 ISD::CONCAT_VECTORS, DL, VT,
9125 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9126 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9127 } else if (Subtarget.hasAVX()) {
9128 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9129 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9130 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9131 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9132 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9133 ArrayRef<SDValue> Ops) {
9134 // Permute Lo and Hi and then select based on index range.
9135 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9136 // care about the bit[7] as its just an index vector.
9137 SDValue Idx = Ops[2];
9138 EVT VT = Idx.getValueType();
9139 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9140 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9141 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9142 ISD::CondCode::SETGT);
9144 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9145 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9150 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9151 Opcode = X86ISD::VPERMV;
9152 else if (Subtarget.hasAVX()) {
9153 // Scale to v32i8 and perform as v32i8.
9154 IndicesVec = ScaleIndices(IndicesVec, 2);
9155 return DAG.getBitcast(
9156 VT, createVariablePermute(
9157 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9158 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9163 if (Subtarget.hasAVX2())
9164 Opcode = X86ISD::VPERMV;
9165 else if (Subtarget.hasAVX()) {
9166 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9167 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9168 {0, 1, 2, 3, 0, 1, 2, 3});
9169 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9170 {4, 5, 6, 7, 4, 5, 6, 7});
9171 if (Subtarget.hasXOP())
9172 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32,
9173 LoLo, HiHi, IndicesVec,
9174 DAG.getConstant(0, DL, MVT::i8)));
9175 // Permute Lo and Hi and then select based on index range.
9176 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9177 SDValue Res = DAG.getSelectCC(
9178 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9179 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9180 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9181 ISD::CondCode::SETGT);
9182 return DAG.getBitcast(VT, Res);
9187 if (Subtarget.hasAVX512()) {
9188 if (!Subtarget.hasVLX()) {
9189 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9190 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9192 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9193 DAG, SDLoc(IndicesVec));
9194 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9196 return extract256BitVector(Res, 0, DAG, DL);
9198 Opcode = X86ISD::VPERMV;
9199 } else if (Subtarget.hasAVX()) {
9200 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9202 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9204 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9205 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9206 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9207 if (Subtarget.hasXOP())
9208 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64,
9209 LoLo, HiHi, IndicesVec,
9210 DAG.getConstant(0, DL, MVT::i8)));
9211 // Permute Lo and Hi and then select based on index range.
9212 // This works as VPERMILPD only uses index bit[1] to permute elements.
9213 SDValue Res = DAG.getSelectCC(
9214 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9215 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9216 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9217 ISD::CondCode::SETGT);
9218 return DAG.getBitcast(VT, Res);
9222 if (Subtarget.hasVBMI())
9223 Opcode = X86ISD::VPERMV;
9226 if (Subtarget.hasBWI())
9227 Opcode = X86ISD::VPERMV;
9233 if (Subtarget.hasAVX512())
9234 Opcode = X86ISD::VPERMV;
9240 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9241 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9242 "Illegal variable permute shuffle type");
9244 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9246 IndicesVec = ScaleIndices(IndicesVec, Scale);
9248 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9249 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9251 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9252 SDValue Res = Opcode == X86ISD::VPERMV
9253 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9254 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9255 return DAG.getBitcast(VT, Res);
9258 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9259 // reasoned to be a permutation of a vector by indices in a non-constant vector.
9260 // (build_vector (extract_elt V, (extract_elt I, 0)),
9261 // (extract_elt V, (extract_elt I, 1)),
9266 // TODO: Handle undefs
9267 // TODO: Utilize pshufb and zero mask blending to support more efficient
9268 // construction of vectors with constant-0 elements.
9270 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9271 const X86Subtarget &Subtarget) {
9272 SDValue SrcVec, IndicesVec;
9273 // Check for a match of the permute source vector and permute index elements.
9274 // This is done by checking that the i-th build_vector operand is of the form:
9275 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9276 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9277 SDValue Op = V.getOperand(Idx);
9278 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9281 // If this is the first extract encountered in V, set the source vector,
9282 // otherwise verify the extract is from the previously defined source
9285 SrcVec = Op.getOperand(0);
9286 else if (SrcVec != Op.getOperand(0))
9288 SDValue ExtractedIndex = Op->getOperand(1);
9289 // Peek through extends.
9290 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9291 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9292 ExtractedIndex = ExtractedIndex.getOperand(0);
9293 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9296 // If this is the first extract from the index vector candidate, set the
9297 // indices vector, otherwise verify the extract is from the previously
9298 // defined indices vector.
9300 IndicesVec = ExtractedIndex.getOperand(0);
9301 else if (IndicesVec != ExtractedIndex.getOperand(0))
9304 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9305 if (!PermIdx || PermIdx->getZExtValue() != Idx)
9310 MVT VT = V.getSimpleValueType();
9311 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9315 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9318 MVT VT = Op.getSimpleValueType();
9319 MVT EltVT = VT.getVectorElementType();
9320 unsigned NumElems = Op.getNumOperands();
9322 // Generate vectors for predicate vectors.
9323 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9324 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9326 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9327 return VectorConstant;
9329 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9330 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9332 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9333 return HorizontalOp;
9334 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9336 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9339 unsigned EVTBits = EltVT.getSizeInBits();
9341 unsigned NumZero = 0;
9342 unsigned NumNonZero = 0;
9343 uint64_t NonZeros = 0;
9344 bool IsAllConstants = true;
9345 SmallSet<SDValue, 8> Values;
9346 unsigned NumConstants = NumElems;
9347 for (unsigned i = 0; i < NumElems; ++i) {
9348 SDValue Elt = Op.getOperand(i);
9352 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9353 IsAllConstants = false;
9356 if (X86::isZeroNode(Elt))
9359 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9360 NonZeros |= ((uint64_t)1 << i);
9365 // All undef vector. Return an UNDEF. All zero vectors were handled above.
9366 if (NumNonZero == 0)
9367 return DAG.getUNDEF(VT);
9369 // If we are inserting one variable into a vector of non-zero constants, try
9370 // to avoid loading each constant element as a scalar. Load the constants as a
9371 // vector and then insert the variable scalar element. If insertion is not
9372 // supported, fall back to a shuffle to get the scalar blended with the
9373 // constants. Insertion into a zero vector is handled as a special-case
9374 // somewhere below here.
9375 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9376 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9377 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9378 // Create an all-constant vector. The variable element in the old
9379 // build vector is replaced by undef in the constant vector. Save the
9380 // variable scalar element and its index for use in the insertelement.
9381 LLVMContext &Context = *DAG.getContext();
9382 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9383 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9386 for (unsigned i = 0; i != NumElems; ++i) {
9387 SDValue Elt = Op.getOperand(i);
9388 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9389 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9390 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9391 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9392 else if (!Elt.isUndef()) {
9393 assert(!VarElt.getNode() && !InsIndex.getNode() &&
9394 "Expected one variable element in this vector");
9396 InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9399 Constant *CV = ConstantVector::get(ConstVecOps);
9400 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9402 // The constants we just created may not be legal (eg, floating point). We
9403 // must lower the vector right here because we can not guarantee that we'll
9404 // legalize it before loading it. This is also why we could not just create
9405 // a new build vector here. If the build vector contains illegal constants,
9406 // it could get split back up into a series of insert elements.
9407 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9408 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9409 MachineFunction &MF = DAG.getMachineFunction();
9410 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9411 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9412 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9413 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9414 if (InsertC < NumEltsInLow128Bits)
9415 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9417 // There's no good way to insert into the high elements of a >128-bit
9418 // vector, so use shuffles to avoid an extract/insert sequence.
9419 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9420 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9421 SmallVector<int, 8> ShuffleMask;
9422 unsigned NumElts = VT.getVectorNumElements();
9423 for (unsigned i = 0; i != NumElts; ++i)
9424 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9425 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9426 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9429 // Special case for single non-zero, non-undef, element.
9430 if (NumNonZero == 1) {
9431 unsigned Idx = countTrailingZeros(NonZeros);
9432 SDValue Item = Op.getOperand(Idx);
9434 // If we have a constant or non-constant insertion into the low element of
9435 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9436 // the rest of the elements. This will be matched as movd/movq/movss/movsd
9437 // depending on what the source datatype is.
9440 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9442 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9443 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9444 assert((VT.is128BitVector() || VT.is256BitVector() ||
9445 VT.is512BitVector()) &&
9446 "Expected an SSE value type!");
9447 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9448 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9449 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9452 // We can't directly insert an i8 or i16 into a vector, so zero extend
9454 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9455 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9456 if (VT.getSizeInBits() >= 256) {
9457 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9458 if (Subtarget.hasAVX()) {
9459 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9460 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9462 // Without AVX, we need to extend to a 128-bit vector and then
9463 // insert into the 256-bit vector.
9464 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
9465 SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
9466 Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
9469 assert(VT.is128BitVector() && "Expected an SSE value type!");
9470 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
9471 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9473 return DAG.getBitcast(VT, Item);
9477 // Is it a vector logical left shift?
9478 if (NumElems == 2 && Idx == 1 &&
9479 X86::isZeroNode(Op.getOperand(0)) &&
9480 !X86::isZeroNode(Op.getOperand(1))) {
9481 unsigned NumBits = VT.getSizeInBits();
9482 return getVShift(true, VT,
9483 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
9484 VT, Op.getOperand(1)),
9485 NumBits/2, DAG, *this, dl);
9488 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
9491 // Otherwise, if this is a vector with i32 or f32 elements, and the element
9492 // is a non-constant being inserted into an element other than the low one,
9493 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
9494 // movd/movss) to move this into the low element, then shuffle it into
9496 if (EVTBits == 32) {
9497 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9498 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
9502 // Splat is obviously ok. Let legalizer expand it to a shuffle.
9503 if (Values.size() == 1) {
9504 if (EVTBits == 32) {
9505 // Instead of a shuffle like this:
9506 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
9507 // Check if it's possible to issue this instead.
9508 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
9509 unsigned Idx = countTrailingZeros(NonZeros);
9510 SDValue Item = Op.getOperand(Idx);
9511 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
9512 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
9517 // A vector full of immediates; various special cases are already
9518 // handled, so this is best done with a single constant-pool load.
9522 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
9525 // See if we can use a vector load to get all of the elements.
9527 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
9529 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9533 // If this is a splat of pairs of 32-bit elements, we can use a narrower
9534 // build_vector and broadcast it.
9535 // TODO: We could probably generalize this more.
9536 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
9537 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9538 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9539 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
9540 // Make sure all the even/odd operands match.
9541 for (unsigned i = 2; i != NumElems; ++i)
9542 if (Ops[i % 2] != Op.getOperand(i))
9546 if (CanSplat(Op, NumElems, Ops)) {
9547 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
9548 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
9549 // Create a new build vector and cast to v2i64/v2f64.
9550 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
9551 DAG.getBuildVector(NarrowVT, dl, Ops));
9552 // Broadcast from v2i64/v2f64 and cast to final VT.
9553 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
9554 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
9559 // For AVX-length vectors, build the individual 128-bit pieces and use
9560 // shuffles to put them in place.
9561 if (VT.getSizeInBits() > 128) {
9562 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
9564 // Build both the lower and upper subvector.
9566 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
9567 SDValue Upper = DAG.getBuildVector(
9568 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
9570 // Recreate the wider vector with the lower and upper part.
9571 return concatSubVectors(Lower, Upper, VT, NumElems, DAG, dl,
9572 VT.getSizeInBits() / 2);
9575 // Let legalizer expand 2-wide build_vectors.
9576 if (EVTBits == 64) {
9577 if (NumNonZero == 1) {
9578 // One half is zero or undef.
9579 unsigned Idx = countTrailingZeros(NonZeros);
9580 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
9581 Op.getOperand(Idx));
9582 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
9587 // If element VT is < 32 bits, convert it to inserts into a zero vector.
9588 if (EVTBits == 8 && NumElems == 16)
9589 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
9593 if (EVTBits == 16 && NumElems == 8)
9594 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
9598 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
9599 if (EVTBits == 32 && NumElems == 4)
9600 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
9603 // If element VT is == 32 bits, turn it into a number of shuffles.
9604 if (NumElems == 4 && NumZero > 0) {
9605 SmallVector<SDValue, 8> Ops(NumElems);
9606 for (unsigned i = 0; i < 4; ++i) {
9607 bool isZero = !(NonZeros & (1ULL << i));
9609 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
9611 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9614 for (unsigned i = 0; i < 2; ++i) {
9615 switch ((NonZeros >> (i*2)) & 0x3) {
9616 default: llvm_unreachable("Unexpected NonZero count");
9618 Ops[i] = Ops[i*2]; // Must be a zero vector.
9621 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
9624 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9627 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9632 bool Reverse1 = (NonZeros & 0x3) == 2;
9633 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
9637 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9638 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
9640 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9643 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9645 // Check for a build vector from mostly shuffle plus few inserting.
9646 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9649 // For SSE 4.1, use insertps to put the high elements into the low element.
9650 if (Subtarget.hasSSE41()) {
9652 if (!Op.getOperand(0).isUndef())
9653 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9655 Result = DAG.getUNDEF(VT);
9657 for (unsigned i = 1; i < NumElems; ++i) {
9658 if (Op.getOperand(i).isUndef()) continue;
9659 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9660 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9665 // Otherwise, expand into a number of unpckl*, start by extending each of
9666 // our (non-undef) elements to the full vector width with the element in the
9667 // bottom slot of the vector (which generates no code for SSE).
9668 SmallVector<SDValue, 8> Ops(NumElems);
9669 for (unsigned i = 0; i < NumElems; ++i) {
9670 if (!Op.getOperand(i).isUndef())
9671 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9673 Ops[i] = DAG.getUNDEF(VT);
9676 // Next, we iteratively mix elements, e.g. for v4f32:
9677 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9678 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9679 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
9680 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9681 // Generate scaled UNPCKL shuffle mask.
9682 SmallVector<int, 16> Mask;
9683 for(unsigned i = 0; i != Scale; ++i)
9685 for (unsigned i = 0; i != Scale; ++i)
9686 Mask.push_back(NumElems+i);
9687 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9689 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9690 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9695 // 256-bit AVX can use the vinsertf128 instruction
9696 // to create 256-bit vectors from two other 128-bit ones.
9697 // TODO: Detect subvector broadcast here instead of DAG combine?
9698 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9699 const X86Subtarget &Subtarget) {
9701 MVT ResVT = Op.getSimpleValueType();
9703 assert((ResVT.is256BitVector() ||
9704 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9706 unsigned NumOperands = Op.getNumOperands();
9707 unsigned NumZero = 0;
9708 unsigned NumNonZero = 0;
9709 unsigned NonZeros = 0;
9710 for (unsigned i = 0; i != NumOperands; ++i) {
9711 SDValue SubVec = Op.getOperand(i);
9712 if (SubVec.isUndef())
9714 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9717 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9723 // If we have more than 2 non-zeros, build each half separately.
9724 if (NumNonZero > 2) {
9725 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
9726 ResVT.getVectorNumElements()/2);
9727 ArrayRef<SDUse> Ops = Op->ops();
9728 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9729 Ops.slice(0, NumOperands/2));
9730 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9731 Ops.slice(NumOperands/2));
9732 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9735 // Otherwise, build it up through insert_subvectors.
9736 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9737 : DAG.getUNDEF(ResVT);
9739 MVT SubVT = Op.getOperand(0).getSimpleValueType();
9740 unsigned NumSubElems = SubVT.getVectorNumElements();
9741 for (unsigned i = 0; i != NumOperands; ++i) {
9742 if ((NonZeros & (1 << i)) == 0)
9745 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9747 DAG.getIntPtrConstant(i * NumSubElems, dl));
9753 // Returns true if the given node is a type promotion (by concatenating i1
9754 // zeros) of the result of a node that already zeros all upper bits of
9756 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
9757 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9758 const X86Subtarget &Subtarget,
9759 SelectionDAG & DAG) {
9761 MVT ResVT = Op.getSimpleValueType();
9762 unsigned NumOperands = Op.getNumOperands();
9764 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
9765 "Unexpected number of operands in CONCAT_VECTORS");
9767 unsigned NumZero = 0;
9768 unsigned NumNonZero = 0;
9769 uint64_t NonZeros = 0;
9770 for (unsigned i = 0; i != NumOperands; ++i) {
9771 SDValue SubVec = Op.getOperand(i);
9772 if (SubVec.isUndef())
9774 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9777 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9778 NonZeros |= (uint64_t)1 << i;
9784 // If there are zero or one non-zeros we can handle this very simply.
9785 if (NumNonZero <= 1) {
9786 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9787 : DAG.getUNDEF(ResVT);
9790 unsigned Idx = countTrailingZeros(NonZeros);
9791 SDValue SubVec = Op.getOperand(Idx);
9792 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9793 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
9794 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
9797 if (NumOperands > 2) {
9798 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
9799 ResVT.getVectorNumElements()/2);
9800 ArrayRef<SDUse> Ops = Op->ops();
9801 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9802 Ops.slice(0, NumOperands/2));
9803 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9804 Ops.slice(NumOperands/2));
9805 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9808 assert(NumNonZero == 2 && "Simple cases not handled?");
9810 if (ResVT.getVectorNumElements() >= 16)
9811 return Op; // The operation is legal with KUNPCK
9813 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
9814 DAG.getUNDEF(ResVT), Op.getOperand(0),
9815 DAG.getIntPtrConstant(0, dl));
9816 unsigned NumElems = ResVT.getVectorNumElements();
9817 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
9818 DAG.getIntPtrConstant(NumElems/2, dl));
9821 static SDValue LowerCONCAT_VECTORS(SDValue Op,
9822 const X86Subtarget &Subtarget,
9823 SelectionDAG &DAG) {
9824 MVT VT = Op.getSimpleValueType();
9825 if (VT.getVectorElementType() == MVT::i1)
9826 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
9828 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
9829 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
9830 Op.getNumOperands() == 4)));
9832 // AVX can use the vinsertf128 instruction to create 256-bit vectors
9833 // from two other 128-bit ones.
9835 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
9836 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
9839 //===----------------------------------------------------------------------===//
9840 // Vector shuffle lowering
9842 // This is an experimental code path for lowering vector shuffles on x86. It is
9843 // designed to handle arbitrary vector shuffles and blends, gracefully
9844 // degrading performance as necessary. It works hard to recognize idiomatic
9845 // shuffles and lower them to optimal instruction patterns without leaving
9846 // a framework that allows reasonably efficient handling of all vector shuffle
9848 //===----------------------------------------------------------------------===//
9850 /// Tiny helper function to identify a no-op mask.
9852 /// This is a somewhat boring predicate function. It checks whether the mask
9853 /// array input, which is assumed to be a single-input shuffle mask of the kind
9854 /// used by the X86 shuffle instructions (not a fully general
9855 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
9856 /// in-place shuffle are 'no-op's.
9857 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
9858 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9859 assert(Mask[i] >= -1 && "Out of bound mask element!");
9860 if (Mask[i] >= 0 && Mask[i] != i)
9866 /// Test whether there are elements crossing 128-bit lanes in this
9869 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9870 /// and we routinely test for these.
9871 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
9872 int LaneSize = 128 / VT.getScalarSizeInBits();
9873 int Size = Mask.size();
9874 for (int i = 0; i < Size; ++i)
9875 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
9880 /// Test whether a shuffle mask is equivalent within each sub-lane.
9882 /// This checks a shuffle mask to see if it is performing the same
9883 /// lane-relative shuffle in each sub-lane. This trivially implies
9884 /// that it is also not lane-crossing. It may however involve a blend from the
9885 /// same lane of a second vector.
9887 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
9888 /// non-trivial to compute in the face of undef lanes. The representation is
9889 /// suitable for use with existing 128-bit shuffles as entries from the second
9890 /// vector have been remapped to [LaneSize, 2*LaneSize).
9891 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
9893 SmallVectorImpl<int> &RepeatedMask) {
9894 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9895 RepeatedMask.assign(LaneSize, -1);
9896 int Size = Mask.size();
9897 for (int i = 0; i < Size; ++i) {
9898 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
9901 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9902 // This entry crosses lanes, so there is no way to model this shuffle.
9905 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9906 // Adjust second vector indices to start at LaneSize instead of Size.
9907 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
9908 : Mask[i] % LaneSize + LaneSize;
9909 if (RepeatedMask[i % LaneSize] < 0)
9910 // This is the first non-undef entry in this slot of a 128-bit lane.
9911 RepeatedMask[i % LaneSize] = LocalM;
9912 else if (RepeatedMask[i % LaneSize] != LocalM)
9913 // Found a mismatch with the repeated mask.
9919 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
9921 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9922 SmallVectorImpl<int> &RepeatedMask) {
9923 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9927 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
9928 SmallVector<int, 32> RepeatedMask;
9929 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9932 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
9934 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9935 SmallVectorImpl<int> &RepeatedMask) {
9936 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
9939 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9940 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
9941 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
9943 SmallVectorImpl<int> &RepeatedMask) {
9944 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9945 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
9946 int Size = Mask.size();
9947 for (int i = 0; i < Size; ++i) {
9948 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
9949 if (Mask[i] == SM_SentinelUndef)
9951 if (Mask[i] == SM_SentinelZero) {
9952 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
9954 RepeatedMask[i % LaneSize] = SM_SentinelZero;
9957 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9958 // This entry crosses lanes, so there is no way to model this shuffle.
9961 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9962 // Adjust second vector indices to start at LaneSize instead of Size.
9964 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
9965 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
9966 // This is the first non-undef entry in this slot of a 128-bit lane.
9967 RepeatedMask[i % LaneSize] = LocalM;
9968 else if (RepeatedMask[i % LaneSize] != LocalM)
9969 // Found a mismatch with the repeated mask.
9975 /// Checks whether a shuffle mask is equivalent to an explicit list of
9978 /// This is a fast way to test a shuffle mask against a fixed pattern:
9980 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
9982 /// It returns true if the mask is exactly as wide as the argument list, and
9983 /// each element of the mask is either -1 (signifying undef) or the value given
9984 /// in the argument.
9985 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
9986 ArrayRef<int> ExpectedMask) {
9987 if (Mask.size() != ExpectedMask.size())
9990 int Size = Mask.size();
9992 // If the values are build vectors, we can look through them to find
9993 // equivalent inputs that make the shuffles equivalent.
9994 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
9995 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
9997 for (int i = 0; i < Size; ++i) {
9998 assert(Mask[i] >= -1 && "Out of bound mask element!");
9999 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10000 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10001 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10002 if (!MaskBV || !ExpectedBV ||
10003 MaskBV->getOperand(Mask[i] % Size) !=
10004 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10012 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10014 /// The masks must be exactly the same width.
10016 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10017 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
10019 /// SM_SentinelZero is accepted as a valid negative index but must match in both.
10020 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10021 ArrayRef<int> ExpectedMask) {
10022 int Size = Mask.size();
10023 if (Size != (int)ExpectedMask.size())
10025 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10026 "Illegal target shuffle mask");
10028 for (int i = 0; i < Size; ++i)
10029 if (Mask[i] == SM_SentinelUndef)
10031 else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
10033 else if (Mask[i] != ExpectedMask[i])
10039 // Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle
10041 static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask,
10042 const APInt &Zeroable) {
10043 int NumElts = Mask.size();
10044 assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes");
10046 SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef);
10047 for (int i = 0; i != NumElts; ++i) {
10049 if (M == SM_SentinelUndef)
10051 assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index");
10052 TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M);
10057 // Attempt to create a shuffle mask from a VSELECT condition mask.
10058 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10060 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10063 unsigned Size = Cond.getValueType().getVectorNumElements();
10064 Mask.resize(Size, SM_SentinelUndef);
10066 for (int i = 0; i != (int)Size; ++i) {
10067 SDValue CondElt = Cond.getOperand(i);
10069 // Arbitrarily choose from the 2nd operand if the select condition element
10071 // TODO: Can we do better by matching patterns such as even/odd?
10072 if (CondElt.isUndef() || isNullConstant(CondElt))
10079 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10081 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10082 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10085 SmallVector<int, 8> Unpcklwd;
10086 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10087 /* Unary = */ false);
10088 SmallVector<int, 8> Unpckhwd;
10089 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10090 /* Unary = */ false);
10091 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10092 isTargetShuffleEquivalent(Mask, Unpckhwd));
10093 return IsUnpackwdMask;
10096 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10097 // Create 128-bit vector type based on mask size.
10098 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10099 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10101 // We can't assume a canonical shuffle mask, so try the commuted version too.
10102 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10103 ShuffleVectorSDNode::commuteMask(CommutedMask);
10105 // Match any of unary/binary or low/high.
10106 for (unsigned i = 0; i != 4; ++i) {
10107 SmallVector<int, 16> UnpackMask;
10108 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10109 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10110 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10116 /// Return true if a shuffle mask chooses elements identically in its top and
10117 /// bottom halves. For example, any splat mask has the same top and bottom
10118 /// halves. If an element is undefined in only one half of the mask, the halves
10119 /// are not considered identical.
10120 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10121 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10122 unsigned HalfSize = Mask.size() / 2;
10123 for (unsigned i = 0; i != HalfSize; ++i) {
10124 if (Mask[i] != Mask[i + HalfSize])
10130 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10132 /// This helper function produces an 8-bit shuffle immediate corresponding to
10133 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10134 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10137 /// NB: We rely heavily on "undef" masks preserving the input lane.
10138 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10139 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10140 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10141 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10142 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10143 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10146 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10147 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10148 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10149 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10153 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10154 SelectionDAG &DAG) {
10155 return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10158 /// Compute whether each element of a shuffle is zeroable.
10160 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
10161 /// Either it is an undef element in the shuffle mask, the element of the input
10162 /// referenced is undef, or the element of the input referenced is known to be
10163 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
10164 /// as many lanes with this technique as possible to simplify the remaining
10166 static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
10167 SDValue V1, SDValue V2) {
10168 APInt Zeroable(Mask.size(), 0);
10169 V1 = peekThroughBitcasts(V1);
10170 V2 = peekThroughBitcasts(V2);
10172 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
10173 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
10175 int VectorSizeInBits = V1.getValueSizeInBits();
10176 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
10177 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
10179 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10181 // Handle the easy cases.
10182 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
10183 Zeroable.setBit(i);
10187 // Determine shuffle input and normalize the mask.
10188 SDValue V = M < Size ? V1 : V2;
10191 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
10192 if (V.getOpcode() != ISD::BUILD_VECTOR)
10195 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
10196 // the (larger) source element must be UNDEF/ZERO.
10197 if ((Size % V.getNumOperands()) == 0) {
10198 int Scale = Size / V->getNumOperands();
10199 SDValue Op = V.getOperand(M / Scale);
10200 if (Op.isUndef() || X86::isZeroNode(Op))
10201 Zeroable.setBit(i);
10202 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
10203 APInt Val = Cst->getAPIntValue();
10204 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10205 Val = Val.getLoBits(ScalarSizeInBits);
10207 Zeroable.setBit(i);
10208 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
10209 APInt Val = Cst->getValueAPF().bitcastToAPInt();
10210 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10211 Val = Val.getLoBits(ScalarSizeInBits);
10213 Zeroable.setBit(i);
10218 // If the BUILD_VECTOR has more elements then all the (smaller) source
10219 // elements must be UNDEF or ZERO.
10220 if ((V.getNumOperands() % Size) == 0) {
10221 int Scale = V->getNumOperands() / Size;
10222 bool AllZeroable = true;
10223 for (int j = 0; j < Scale; ++j) {
10224 SDValue Op = V.getOperand((M * Scale) + j);
10225 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
10228 Zeroable.setBit(i);
10236 // The Shuffle result is as follow:
10237 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10238 // Each Zeroable's element correspond to a particular Mask's element.
10239 // As described in computeZeroableShuffleElements function.
10241 // The function looks for a sub-mask that the nonzero elements are in
10242 // increasing order. If such sub-mask exist. The function returns true.
10243 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10244 ArrayRef<int> Mask, const EVT &VectorType,
10245 bool &IsZeroSideLeft) {
10246 int NextElement = -1;
10247 // Check if the Mask's nonzero elements are in increasing order.
10248 for (int i = 0, e = Mask.size(); i < e; i++) {
10249 // Checks if the mask's zeros elements are built from only zeros.
10250 assert(Mask[i] >= -1 && "Out of bound mask element!");
10255 // Find the lowest non zero element
10256 if (NextElement < 0) {
10257 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10258 IsZeroSideLeft = NextElement != 0;
10260 // Exit if the mask's non zero elements are not in increasing order.
10261 if (NextElement != Mask[i])
10268 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10269 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10270 ArrayRef<int> Mask, SDValue V1,
10271 SDValue V2, const APInt &Zeroable,
10272 const X86Subtarget &Subtarget,
10273 SelectionDAG &DAG) {
10274 int Size = Mask.size();
10275 int LaneSize = 128 / VT.getScalarSizeInBits();
10276 const int NumBytes = VT.getSizeInBits() / 8;
10277 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10279 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10280 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10281 (Subtarget.hasBWI() && VT.is512BitVector()));
10283 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10284 // Sign bit set in i8 mask means zero element.
10285 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10288 for (int i = 0; i < NumBytes; ++i) {
10289 int M = Mask[i / NumEltBytes];
10291 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10294 if (Zeroable[i / NumEltBytes]) {
10295 PSHUFBMask[i] = ZeroMask;
10299 // We can only use a single input of V1 or V2.
10300 SDValue SrcV = (M >= Size ? V2 : V1);
10301 if (V && V != SrcV)
10306 // PSHUFB can't cross lanes, ensure this doesn't happen.
10307 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10311 M = M * NumEltBytes + (i % NumEltBytes);
10312 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10314 assert(V && "Failed to find a source input");
10316 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10317 return DAG.getBitcast(
10318 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10319 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10322 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10323 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10326 // X86 has dedicated shuffle that can be lowered to VEXPAND
10327 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10328 const APInt &Zeroable,
10329 ArrayRef<int> Mask, SDValue &V1,
10330 SDValue &V2, SelectionDAG &DAG,
10331 const X86Subtarget &Subtarget) {
10332 bool IsLeftZeroSide = true;
10333 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10336 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10338 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10339 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10340 unsigned NumElts = VT.getVectorNumElements();
10341 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10342 "Unexpected number of vector elements");
10343 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10344 Subtarget, DAG, DL);
10345 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10346 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10347 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10350 static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10351 unsigned &UnpackOpcode, bool IsUnary,
10352 ArrayRef<int> TargetMask,
10353 const SDLoc &DL, SelectionDAG &DAG,
10354 const X86Subtarget &Subtarget) {
10355 int NumElts = VT.getVectorNumElements();
10357 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10358 for (int i = 0; i != NumElts; i += 2) {
10359 int M1 = TargetMask[i + 0];
10360 int M2 = TargetMask[i + 1];
10361 Undef1 &= (SM_SentinelUndef == M1);
10362 Undef2 &= (SM_SentinelUndef == M2);
10363 Zero1 &= isUndefOrZero(M1);
10364 Zero2 &= isUndefOrZero(M2);
10366 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10367 "Zeroable shuffle detected");
10369 // Attempt to match the target mask against the unpack lo/hi mask patterns.
10370 SmallVector<int, 64> Unpckl, Unpckh;
10371 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10372 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10373 UnpackOpcode = X86ISD::UNPCKL;
10374 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10375 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10379 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10380 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10381 UnpackOpcode = X86ISD::UNPCKH;
10382 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10383 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10387 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10388 if (IsUnary && (Zero1 || Zero2)) {
10389 // Don't bother if we can blend instead.
10390 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10391 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10394 bool MatchLo = true, MatchHi = true;
10395 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10396 int M = TargetMask[i];
10398 // Ignore if the input is known to be zero or the index is undef.
10399 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10400 (M == SM_SentinelUndef))
10403 MatchLo &= (M == Unpckl[i]);
10404 MatchHi &= (M == Unpckh[i]);
10407 if (MatchLo || MatchHi) {
10408 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10409 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10410 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10415 // If a binary shuffle, commute and try again.
10417 ShuffleVectorSDNode::commuteMask(Unpckl);
10418 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10419 UnpackOpcode = X86ISD::UNPCKL;
10424 ShuffleVectorSDNode::commuteMask(Unpckh);
10425 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10426 UnpackOpcode = X86ISD::UNPCKH;
10435 // X86 has dedicated unpack instructions that can handle specific blend
10436 // operations: UNPCKH and UNPCKL.
10437 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10438 ArrayRef<int> Mask, SDValue V1, SDValue V2,
10439 SelectionDAG &DAG) {
10440 SmallVector<int, 8> Unpckl;
10441 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10442 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10443 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10445 SmallVector<int, 8> Unpckh;
10446 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10447 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10448 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10450 // Commute and try again.
10451 ShuffleVectorSDNode::commuteMask(Unpckl);
10452 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10453 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10455 ShuffleVectorSDNode::commuteMask(Unpckh);
10456 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10457 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10462 static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10464 int Size = (int)Mask.size();
10465 int Split = Size / Delta;
10466 int TruncatedVectorStart = SwappedOps ? Size : 0;
10468 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10469 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10472 // The rest of the mask should not refer to the truncated vector's elements.
10473 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10474 TruncatedVectorStart + Size))
10480 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10482 // An example is the following:
10484 // t0: ch = EntryToken
10485 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10486 // t25: v4i32 = truncate t2
10487 // t41: v8i16 = bitcast t25
10488 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10489 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10490 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10491 // t18: v2i64 = bitcast t51
10493 // Without avx512vl, this is lowered to:
10495 // vpmovqd %zmm0, %ymm0
10496 // vpshufb {{.*#+}} xmm0 =
10497 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10499 // But when avx512vl is available, one can just use a single vpmovdw
10501 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10502 MVT VT, SDValue V1, SDValue V2,
10504 const X86Subtarget &Subtarget) {
10505 if (VT != MVT::v16i8 && VT != MVT::v8i16)
10508 if (Mask.size() != VT.getVectorNumElements())
10511 bool SwappedOps = false;
10513 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10514 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10523 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10524 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10526 // and similar ones.
10527 if (V1.getOpcode() != ISD::BITCAST)
10529 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
10532 SDValue Src = V1.getOperand(0).getOperand(0);
10533 MVT SrcVT = Src.getSimpleValueType();
10535 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
10536 // are only available with avx512vl.
10537 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
10540 // Down Convert Word to Byte is only available with avx512bw. The case with
10541 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
10542 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
10543 !Subtarget.hasBWI())
10546 // The first half/quarter of the mask should refer to every second/fourth
10547 // element of the vector truncated and bitcasted.
10548 if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
10549 !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
10552 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
10555 // X86 has dedicated pack instructions that can handle specific truncation
10556 // operations: PACKSS and PACKUS.
10557 static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
10558 SDValue &V2, unsigned &PackOpcode,
10559 ArrayRef<int> TargetMask,
10561 const X86Subtarget &Subtarget) {
10562 unsigned NumElts = VT.getVectorNumElements();
10563 unsigned BitSize = VT.getScalarSizeInBits();
10564 MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
10565 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
10567 auto MatchPACK = [&](SDValue N1, SDValue N2) {
10568 SDValue VV1 = DAG.getBitcast(PackVT, N1);
10569 SDValue VV2 = DAG.getBitcast(PackVT, N2);
10570 if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
10571 APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
10572 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
10573 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
10577 PackOpcode = X86ISD::PACKUS;
10581 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
10582 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
10586 PackOpcode = X86ISD::PACKSS;
10592 // Try binary shuffle.
10593 SmallVector<int, 32> BinaryMask;
10594 createPackShuffleMask(VT, BinaryMask, false);
10595 if (isTargetShuffleEquivalent(TargetMask, BinaryMask))
10596 if (MatchPACK(V1, V2))
10599 // Try unary shuffle.
10600 SmallVector<int, 32> UnaryMask;
10601 createPackShuffleMask(VT, UnaryMask, true);
10602 if (isTargetShuffleEquivalent(TargetMask, UnaryMask))
10603 if (MatchPACK(V1, V1))
10609 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10610 SDValue V1, SDValue V2, SelectionDAG &DAG,
10611 const X86Subtarget &Subtarget) {
10613 unsigned PackOpcode;
10614 if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10616 return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
10617 DAG.getBitcast(PackVT, V2));
10622 /// Try to emit a bitmask instruction for a shuffle.
10624 /// This handles cases where we can model a blend exactly as a bitmask due to
10625 /// one of the inputs being zeroable.
10626 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10627 SDValue V2, ArrayRef<int> Mask,
10628 const APInt &Zeroable,
10629 const X86Subtarget &Subtarget,
10630 SelectionDAG &DAG) {
10632 MVT EltVT = VT.getVectorElementType();
10633 SDValue Zero, AllOnes;
10634 // Use f64 if i64 isn't legal.
10635 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10637 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10641 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10642 Zero = DAG.getConstantFP(0.0, DL, EltVT);
10643 AllOnes = DAG.getConstantFP(
10644 APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
10646 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10648 Zero = DAG.getConstant(0, DL, EltVT);
10649 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10652 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10654 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10657 if (Mask[i] % Size != i)
10658 return SDValue(); // Not a blend.
10660 V = Mask[i] < Size ? V1 : V2;
10661 else if (V != (Mask[i] < Size ? V1 : V2))
10662 return SDValue(); // Can only let one input through the mask.
10664 VMaskOps[i] = AllOnes;
10667 return SDValue(); // No non-zeroable elements!
10669 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10670 VMask = DAG.getBitcast(LogicVT, VMask);
10671 V = DAG.getBitcast(LogicVT, V);
10672 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10673 return DAG.getBitcast(VT, And);
10676 /// Try to emit a blend instruction for a shuffle using bit math.
10678 /// This is used as a fallback approach when first class blend instructions are
10679 /// unavailable. Currently it is only suitable for integer vectors, but could
10680 /// be generalized for floating point vectors if desirable.
10681 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10682 SDValue V2, ArrayRef<int> Mask,
10683 SelectionDAG &DAG) {
10684 assert(VT.isInteger() && "Only supports integer vector types!");
10685 MVT EltVT = VT.getVectorElementType();
10686 SDValue Zero = DAG.getConstant(0, DL, EltVT);
10687 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10688 SmallVector<SDValue, 16> MaskOps;
10689 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10690 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10691 return SDValue(); // Shuffled input!
10692 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10695 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10696 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
10697 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
10698 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
10701 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10702 SDValue PreservedSrc,
10703 const X86Subtarget &Subtarget,
10704 SelectionDAG &DAG);
10706 static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
10707 MutableArrayRef<int> TargetMask,
10708 bool &ForceV1Zero, bool &ForceV2Zero,
10709 uint64_t &BlendMask) {
10710 bool V1IsZeroOrUndef =
10711 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10712 bool V2IsZeroOrUndef =
10713 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10716 ForceV1Zero = false, ForceV2Zero = false;
10717 assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask");
10719 // Attempt to generate the binary blend mask. If an input is zero then
10720 // we can use any lane.
10721 // TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
10722 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
10723 int M = TargetMask[i];
10724 if (M == SM_SentinelUndef)
10728 if (M == i + Size) {
10729 BlendMask |= 1ull << i;
10732 if (M == SM_SentinelZero) {
10733 if (V1IsZeroOrUndef) {
10734 ForceV1Zero = true;
10738 if (V2IsZeroOrUndef) {
10739 ForceV2Zero = true;
10740 BlendMask |= 1ull << i;
10741 TargetMask[i] = i + Size;
10750 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
10752 uint64_t ScaledMask = 0;
10753 for (int i = 0; i != Size; ++i)
10754 if (BlendMask & (1ull << i))
10755 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
10759 /// Try to emit a blend instruction for a shuffle.
10761 /// This doesn't do any checks for the availability of instructions for blending
10762 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
10763 /// be matched in the backend with the type given. What it does check for is
10764 /// that the shuffle mask is a blend, or convertible into a blend with zero.
10765 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
10766 SDValue V2, ArrayRef<int> Original,
10767 const APInt &Zeroable,
10768 const X86Subtarget &Subtarget,
10769 SelectionDAG &DAG) {
10770 SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable);
10772 uint64_t BlendMask = 0;
10773 bool ForceV1Zero = false, ForceV2Zero = false;
10774 if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero,
10778 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
10780 V1 = getZeroVector(VT, Subtarget, DAG, DL);
10782 V2 = getZeroVector(VT, Subtarget, DAG, DL);
10784 switch (VT.SimpleTy) {
10787 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
10791 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
10798 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
10799 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
10800 DAG.getConstant(BlendMask, DL, MVT::i8));
10801 case MVT::v16i16: {
10802 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
10803 SmallVector<int, 8> RepeatedMask;
10804 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10805 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
10806 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
10808 for (int i = 0; i < 8; ++i)
10809 if (RepeatedMask[i] >= 8)
10810 BlendMask |= 1ull << i;
10811 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10812 DAG.getConstant(BlendMask, DL, MVT::i8));
10814 // Use PBLENDW for lower/upper lanes and then blend lanes.
10815 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
10816 // merge to VSELECT where useful.
10817 uint64_t LoMask = BlendMask & 0xFF;
10818 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
10819 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
10820 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10821 DAG.getConstant(LoMask, DL, MVT::i8));
10822 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10823 DAG.getConstant(HiMask, DL, MVT::i8));
10824 return DAG.getVectorShuffle(
10825 MVT::v16i16, DL, Lo, Hi,
10826 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
10831 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
10834 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
10836 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
10837 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10841 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
10843 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10844 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10845 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10848 // Scale the blend by the number of bytes per element.
10849 int Scale = VT.getScalarSizeInBits() / 8;
10851 // This form of blend is always done on bytes. Compute the byte vector
10853 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10855 // x86 allows load folding with blendvb from the 2nd source operand. But
10856 // we are still using LLVM select here (see comment below), so that's V1.
10857 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
10858 // allow that load-folding possibility.
10859 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
10860 ShuffleVectorSDNode::commuteMask(Mask);
10864 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
10865 // mix of LLVM's code generator and the x86 backend. We tell the code
10866 // generator that boolean values in the elements of an x86 vector register
10867 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
10868 // mapping a select to operand #1, and 'false' mapping to operand #2. The
10869 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
10870 // of the element (the remaining are ignored) and 0 in that high bit would
10871 // mean operand #1 while 1 in the high bit would mean operand #2. So while
10872 // the LLVM model for boolean values in vector elements gets the relevant
10873 // bit set, it is set backwards and over constrained relative to x86's
10875 SmallVector<SDValue, 32> VSELECTMask;
10876 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10877 for (int j = 0; j < Scale; ++j)
10878 VSELECTMask.push_back(
10879 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
10880 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
10883 V1 = DAG.getBitcast(BlendVT, V1);
10884 V2 = DAG.getBitcast(BlendVT, V2);
10885 return DAG.getBitcast(
10887 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
10896 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
10897 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
10899 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10904 // Otherwise load an immediate into a GPR, cast to k-register, and use a
10907 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10908 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10909 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10912 llvm_unreachable("Not a supported integer vector type!");
10916 /// Try to lower as a blend of elements from two inputs followed by
10917 /// a single-input permutation.
10919 /// This matches the pattern where we can blend elements from two inputs and
10920 /// then reduce the shuffle to a single-input permutation.
10921 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
10922 SDValue V1, SDValue V2,
10923 ArrayRef<int> Mask,
10925 bool ImmBlends = false) {
10926 // We build up the blend mask while checking whether a blend is a viable way
10927 // to reduce the shuffle.
10928 SmallVector<int, 32> BlendMask(Mask.size(), -1);
10929 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
10931 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10935 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
10937 if (BlendMask[Mask[i] % Size] < 0)
10938 BlendMask[Mask[i] % Size] = Mask[i];
10939 else if (BlendMask[Mask[i] % Size] != Mask[i])
10940 return SDValue(); // Can't blend in the needed input!
10942 PermuteMask[i] = Mask[i] % Size;
10945 // If only immediate blends, then bail if the blend mask can't be widened to
10947 unsigned EltSize = VT.getScalarSizeInBits();
10948 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
10951 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
10952 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
10955 /// Try to lower as an unpack of elements from two inputs followed by
10956 /// a single-input permutation.
10958 /// This matches the pattern where we can unpack elements from two inputs and
10959 /// then reduce the shuffle to a single-input (wider) permutation.
10960 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
10961 SDValue V1, SDValue V2,
10962 ArrayRef<int> Mask,
10963 SelectionDAG &DAG) {
10964 int NumElts = Mask.size();
10965 int NumLanes = VT.getSizeInBits() / 128;
10966 int NumLaneElts = NumElts / NumLanes;
10967 int NumHalfLaneElts = NumLaneElts / 2;
10969 bool MatchLo = true, MatchHi = true;
10970 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
10972 // Determine UNPCKL/UNPCKH type and operand order.
10973 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10974 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
10975 int M = Mask[Lane + Elt];
10979 SDValue &Op = Ops[Elt & 1];
10980 if (M < NumElts && (Op.isUndef() || Op == V1))
10982 else if (NumElts <= M && (Op.isUndef() || Op == V2))
10987 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
10988 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
10989 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
10990 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
10991 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
10992 if (!MatchLo && !MatchHi)
10996 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
10998 // Now check that each pair of elts come from the same unpack pair
10999 // and set the permute mask based on each pair.
11000 // TODO - Investigate cases where we permute individual elements.
11001 SmallVector<int, 32> PermuteMask(NumElts, -1);
11002 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11003 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11004 int M0 = Mask[Lane + Elt + 0];
11005 int M1 = Mask[Lane + Elt + 1];
11006 if (0 <= M0 && 0 <= M1 &&
11007 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11010 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11012 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11016 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11017 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11018 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11021 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11022 /// permuting the elements of the result in place.
11023 static SDValue lowerShuffleAsByteRotateAndPermute(
11024 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11025 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11026 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11027 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11028 (VT.is512BitVector() && !Subtarget.hasBWI()))
11031 // We don't currently support lane crossing permutes.
11032 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11035 int Scale = VT.getScalarSizeInBits() / 8;
11036 int NumLanes = VT.getSizeInBits() / 128;
11037 int NumElts = VT.getVectorNumElements();
11038 int NumEltsPerLane = NumElts / NumLanes;
11040 // Determine range of mask elts.
11041 bool Blend1 = true;
11042 bool Blend2 = true;
11043 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11044 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11045 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11046 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11047 int M = Mask[Lane + Elt];
11051 Blend1 &= (M == (Lane + Elt));
11052 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11053 M = M % NumEltsPerLane;
11054 Range1.first = std::min(Range1.first, M);
11055 Range1.second = std::max(Range1.second, M);
11058 Blend2 &= (M == (Lane + Elt));
11059 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11060 M = M % NumEltsPerLane;
11061 Range2.first = std::min(Range2.first, M);
11062 Range2.second = std::max(Range2.second, M);
11067 // Bail if we don't need both elements.
11068 // TODO - it might be worth doing this for unary shuffles if the permute
11070 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11071 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11074 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11077 // Rotate the 2 ops so we can access both ranges, then permute the result.
11078 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11079 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11080 SDValue Rotate = DAG.getBitcast(
11081 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11082 DAG.getBitcast(ByteVT, Lo),
11083 DAG.getConstant(Scale * RotAmt, DL, MVT::i8)));
11084 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11085 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11086 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11087 int M = Mask[Lane + Elt];
11091 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11093 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11096 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11099 // Check if the ranges are small enough to rotate from either direction.
11100 if (Range2.second < Range1.first)
11101 return RotateAndPermute(V1, V2, Range1.first, 0);
11102 if (Range1.second < Range2.first)
11103 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11107 /// Generic routine to decompose a shuffle and blend into independent
11108 /// blends and permutes.
11110 /// This matches the extremely common pattern for handling combined
11111 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11112 /// operations. It will try to pick the best arrangement of shuffles and
11114 static SDValue lowerShuffleAsDecomposedShuffleBlend(
11115 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11116 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11117 // Shuffle the input elements into the desired positions in V1 and V2 and
11118 // blend them together.
11119 SmallVector<int, 32> V1Mask(Mask.size(), -1);
11120 SmallVector<int, 32> V2Mask(Mask.size(), -1);
11121 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11122 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11123 if (Mask[i] >= 0 && Mask[i] < Size) {
11124 V1Mask[i] = Mask[i];
11126 } else if (Mask[i] >= Size) {
11127 V2Mask[i] = Mask[i] - Size;
11128 BlendMask[i] = i + Size;
11131 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11132 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11133 // the shuffle may be able to fold with a load or other benefit. However, when
11134 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11135 // pre-shuffle first is a better strategy.
11136 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11137 // Only prefer immediate blends to unpack/rotate.
11138 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11141 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11144 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11145 DL, VT, V1, V2, Mask, Subtarget, DAG))
11147 // Unpack/rotate failed - try again with variable blends.
11148 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11153 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11154 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11155 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11158 /// Try to lower a vector shuffle as a rotation.
11160 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11161 static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11162 int NumElts = Mask.size();
11164 // We need to detect various ways of spelling a rotation:
11165 // [11, 12, 13, 14, 15, 0, 1, 2]
11166 // [-1, 12, 13, 14, -1, -1, 1, -1]
11167 // [-1, -1, -1, -1, -1, -1, 1, 2]
11168 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11169 // [-1, 4, 5, 6, -1, -1, 9, -1]
11170 // [-1, 4, 5, 6, -1, -1, -1, -1]
11173 for (int i = 0; i < NumElts; ++i) {
11175 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11176 "Unexpected mask index.");
11180 // Determine where a rotated vector would have started.
11181 int StartIdx = i - (M % NumElts);
11183 // The identity rotation isn't interesting, stop.
11186 // If we found the tail of a vector the rotation must be the missing
11187 // front. If we found the head of a vector, it must be how much of the
11189 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11192 Rotation = CandidateRotation;
11193 else if (Rotation != CandidateRotation)
11194 // The rotations don't match, so we can't match this mask.
11197 // Compute which value this mask is pointing at.
11198 SDValue MaskV = M < NumElts ? V1 : V2;
11200 // Compute which of the two target values this index should be assigned
11201 // to. This reflects whether the high elements are remaining or the low
11202 // elements are remaining.
11203 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11205 // Either set up this value if we've not encountered it before, or check
11206 // that it remains consistent.
11209 else if (TargetV != MaskV)
11210 // This may be a rotation, but it pulls from the inputs in some
11211 // unsupported interleaving.
11215 // Check that we successfully analyzed the mask, and normalize the results.
11216 assert(Rotation != 0 && "Failed to locate a viable rotation!");
11217 assert((Lo || Hi) && "Failed to find a rotated input vector!");
11229 /// Try to lower a vector shuffle as a byte rotation.
11231 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11232 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11233 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11234 /// try to generically lower a vector shuffle through such an pattern. It
11235 /// does not check for the profitability of lowering either as PALIGNR or
11236 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11237 /// This matches shuffle vectors that look like:
11239 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11241 /// Essentially it concatenates V1 and V2, shifts right by some number of
11242 /// elements, and takes the low elements as the result. Note that while this is
11243 /// specified as a *right shift* because x86 is little-endian, it is a *left
11244 /// rotate* of the vector lanes.
11245 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11246 ArrayRef<int> Mask) {
11247 // Don't accept any shuffles with zero elements.
11248 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11251 // PALIGNR works on 128-bit lanes.
11252 SmallVector<int, 16> RepeatedMask;
11253 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11256 int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11260 // PALIGNR rotates bytes, so we need to scale the
11261 // rotation based on how many bytes are in the vector lane.
11262 int NumElts = RepeatedMask.size();
11263 int Scale = 16 / NumElts;
11264 return Rotation * Scale;
11267 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11268 SDValue V2, ArrayRef<int> Mask,
11269 const X86Subtarget &Subtarget,
11270 SelectionDAG &DAG) {
11271 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11273 SDValue Lo = V1, Hi = V2;
11274 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11275 if (ByteRotation <= 0)
11278 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11280 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11281 Lo = DAG.getBitcast(ByteVT, Lo);
11282 Hi = DAG.getBitcast(ByteVT, Hi);
11284 // SSSE3 targets can use the palignr instruction.
11285 if (Subtarget.hasSSSE3()) {
11286 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11287 "512-bit PALIGNR requires BWI instructions");
11288 return DAG.getBitcast(
11289 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11290 DAG.getConstant(ByteRotation, DL, MVT::i8)));
11293 assert(VT.is128BitVector() &&
11294 "Rotate-based lowering only supports 128-bit lowering!");
11295 assert(Mask.size() <= 16 &&
11296 "Can shuffle at most 16 bytes in a 128-bit vector!");
11297 assert(ByteVT == MVT::v16i8 &&
11298 "SSE2 rotate lowering only needed for v16i8!");
11300 // Default SSE2 implementation
11301 int LoByteShift = 16 - ByteRotation;
11302 int HiByteShift = ByteRotation;
11304 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11305 DAG.getConstant(LoByteShift, DL, MVT::i8));
11306 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11307 DAG.getConstant(HiByteShift, DL, MVT::i8));
11308 return DAG.getBitcast(VT,
11309 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11312 /// Try to lower a vector shuffle as a dword/qword rotation.
11314 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11315 /// rotation of the concatenation of two vectors; This routine will
11316 /// try to generically lower a vector shuffle through such an pattern.
11318 /// Essentially it concatenates V1 and V2, shifts right by some number of
11319 /// elements, and takes the low elements as the result. Note that while this is
11320 /// specified as a *right shift* because x86 is little-endian, it is a *left
11321 /// rotate* of the vector lanes.
11322 static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11323 SDValue V2, ArrayRef<int> Mask,
11324 const X86Subtarget &Subtarget,
11325 SelectionDAG &DAG) {
11326 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11327 "Only 32-bit and 64-bit elements are supported!");
11329 // 128/256-bit vectors are only supported with VLX.
11330 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11331 && "VLX required for 128/256-bit vectors");
11333 SDValue Lo = V1, Hi = V2;
11334 int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11338 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11339 DAG.getConstant(Rotation, DL, MVT::i8));
11342 /// Try to lower a vector shuffle as a byte shift sequence.
11343 static SDValue lowerVectorShuffleAsByteShiftMask(
11344 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11345 const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11346 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11347 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11349 // We need a shuffle that has zeros at one/both ends and a sequential
11350 // shuffle from one source within.
11351 unsigned ZeroLo = Zeroable.countTrailingOnes();
11352 unsigned ZeroHi = Zeroable.countLeadingOnes();
11353 if (!ZeroLo && !ZeroHi)
11356 unsigned NumElts = Mask.size();
11357 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11358 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11361 unsigned Scale = VT.getScalarSizeInBits() / 8;
11362 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11363 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11364 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11367 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11368 Res = DAG.getBitcast(MVT::v16i8, Res);
11370 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11371 // inner sequential set of elements, possibly offset:
11372 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11373 // 01234567 --> 4567zzzz --> zzzzz456
11374 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11376 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11377 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11378 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11379 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11380 DAG.getConstant(Scale * ZeroHi, DL, MVT::i8));
11381 } else if (ZeroHi == 0) {
11382 unsigned Shift = Mask[ZeroLo] % NumElts;
11383 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11384 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11385 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11386 DAG.getConstant(Scale * ZeroLo, DL, MVT::i8));
11387 } else if (!Subtarget.hasSSSE3()) {
11388 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11389 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11390 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11391 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11392 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11393 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11394 Shift += Mask[ZeroLo] % NumElts;
11395 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11396 DAG.getConstant(Scale * Shift, DL, MVT::i8));
11397 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11398 DAG.getConstant(Scale * ZeroLo, DL, MVT::i8));
11402 return DAG.getBitcast(VT, Res);
11405 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11407 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11408 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11409 /// matches elements from one of the input vectors shuffled to the left or
11410 /// right with zeroable elements 'shifted in'. It handles both the strictly
11411 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11412 /// quad word lane.
11414 /// PSHL : (little-endian) left bit shift.
11415 /// [ zz, 0, zz, 2 ]
11416 /// [ -1, 4, zz, -1 ]
11417 /// PSRL : (little-endian) right bit shift.
11418 /// [ 1, zz, 3, zz]
11419 /// [ -1, -1, 7, zz]
11420 /// PSLLDQ : (little-endian) left byte shift
11421 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
11422 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
11423 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
11424 /// PSRLDQ : (little-endian) right byte shift
11425 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
11426 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
11427 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
11428 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11429 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11430 int MaskOffset, const APInt &Zeroable,
11431 const X86Subtarget &Subtarget) {
11432 int Size = Mask.size();
11433 unsigned SizeInBits = Size * ScalarSizeInBits;
11435 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11436 for (int i = 0; i < Size; i += Scale)
11437 for (int j = 0; j < Shift; ++j)
11438 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11444 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11445 for (int i = 0; i != Size; i += Scale) {
11446 unsigned Pos = Left ? i + Shift : i;
11447 unsigned Low = Left ? i : i + Shift;
11448 unsigned Len = Scale - Shift;
11449 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11453 int ShiftEltBits = ScalarSizeInBits * Scale;
11454 bool ByteShift = ShiftEltBits > 64;
11455 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11456 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11457 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11459 // Normalize the scale for byte shifts to still produce an i64 element
11461 Scale = ByteShift ? Scale / 2 : Scale;
11463 // We need to round trip through the appropriate type for the shift.
11464 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11465 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11466 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11467 return (int)ShiftAmt;
11470 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11471 // keep doubling the size of the integer elements up to that. We can
11472 // then shift the elements of the integer vector by whole multiples of
11473 // their width within the elements of the larger integer vector. Test each
11474 // multiple to see if we can find a match with the moved element indices
11475 // and that the shifted in elements are all zeroable.
11476 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11477 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11478 for (int Shift = 1; Shift != Scale; ++Shift)
11479 for (bool Left : {true, false})
11480 if (CheckZeros(Shift, Scale, Left)) {
11481 int ShiftAmt = MatchShift(Shift, Scale, Left);
11490 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11491 SDValue V2, ArrayRef<int> Mask,
11492 const APInt &Zeroable,
11493 const X86Subtarget &Subtarget,
11494 SelectionDAG &DAG) {
11495 int Size = Mask.size();
11496 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11502 // Try to match shuffle against V1 shift.
11503 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11504 Mask, 0, Zeroable, Subtarget);
11506 // If V1 failed, try to match shuffle against V2 shift.
11507 if (ShiftAmt < 0) {
11508 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11509 Mask, Size, Zeroable, Subtarget);
11516 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11517 "Illegal integer vector type");
11518 V = DAG.getBitcast(ShiftVT, V);
11519 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11520 DAG.getConstant(ShiftAmt, DL, MVT::i8));
11521 return DAG.getBitcast(VT, V);
11524 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11525 // Remainder of lower half result is zero and upper half is all undef.
11526 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11527 ArrayRef<int> Mask, uint64_t &BitLen,
11528 uint64_t &BitIdx, const APInt &Zeroable) {
11529 int Size = Mask.size();
11530 int HalfSize = Size / 2;
11531 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11532 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
11534 // Upper half must be undefined.
11535 if (!isUndefUpperHalf(Mask))
11538 // Determine the extraction length from the part of the
11539 // lower half that isn't zeroable.
11540 int Len = HalfSize;
11541 for (; Len > 0; --Len)
11542 if (!Zeroable[Len - 1])
11544 assert(Len > 0 && "Zeroable shuffle mask");
11546 // Attempt to match first Len sequential elements from the lower half.
11549 for (int i = 0; i != Len; ++i) {
11551 if (M == SM_SentinelUndef)
11553 SDValue &V = (M < Size ? V1 : V2);
11556 // The extracted elements must start at a valid index and all mask
11557 // elements must be in the lower half.
11558 if (i > M || M >= HalfSize)
11561 if (Idx < 0 || (Src == V && Idx == (M - i))) {
11569 if (!Src || Idx < 0)
11572 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11573 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11574 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11579 // INSERTQ: Extract lowest Len elements from lower half of second source and
11580 // insert over first source, starting at Idx.
11581 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11582 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11583 ArrayRef<int> Mask, uint64_t &BitLen,
11584 uint64_t &BitIdx) {
11585 int Size = Mask.size();
11586 int HalfSize = Size / 2;
11587 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11589 // Upper half must be undefined.
11590 if (!isUndefUpperHalf(Mask))
11593 for (int Idx = 0; Idx != HalfSize; ++Idx) {
11596 // Attempt to match first source from mask before insertion point.
11597 if (isUndefInRange(Mask, 0, Idx)) {
11599 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11601 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11607 // Extend the extraction length looking to match both the insertion of
11608 // the second source and the remaining elements of the first.
11609 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11611 int Len = Hi - Idx;
11613 // Match insertion.
11614 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11616 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11622 // Match the remaining elements of the lower half.
11623 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11625 } else if ((!Base || (Base == V1)) &&
11626 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11628 } else if ((!Base || (Base == V2)) &&
11629 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11636 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11637 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11647 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11648 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11649 SDValue V2, ArrayRef<int> Mask,
11650 const APInt &Zeroable, SelectionDAG &DAG) {
11651 uint64_t BitLen, BitIdx;
11652 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11653 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11654 DAG.getConstant(BitLen, DL, MVT::i8),
11655 DAG.getConstant(BitIdx, DL, MVT::i8));
11657 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11658 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11659 V2 ? V2 : DAG.getUNDEF(VT),
11660 DAG.getConstant(BitLen, DL, MVT::i8),
11661 DAG.getConstant(BitIdx, DL, MVT::i8));
11666 /// Lower a vector shuffle as a zero or any extension.
11668 /// Given a specific number of elements, element bit width, and extension
11669 /// stride, produce either a zero or any extension based on the available
11670 /// features of the subtarget. The extended elements are consecutive and
11671 /// begin and can start from an offsetted element index in the input; to
11672 /// avoid excess shuffling the offset must either being in the bottom lane
11673 /// or at the start of a higher lane. All extended elements must be from
11675 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11676 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11677 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11678 assert(Scale > 1 && "Need a scale to extend.");
11679 int EltBits = VT.getScalarSizeInBits();
11680 int NumElements = VT.getVectorNumElements();
11681 int NumEltsPerLane = 128 / EltBits;
11682 int OffsetLane = Offset / NumEltsPerLane;
11683 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11684 "Only 8, 16, and 32 bit elements can be extended.");
11685 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11686 assert(0 <= Offset && "Extension offset must be positive.");
11687 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11688 "Extension offset must be in the first lane or start an upper lane.");
11690 // Check that an index is in same lane as the base offset.
11691 auto SafeOffset = [&](int Idx) {
11692 return OffsetLane == (Idx / NumEltsPerLane);
11695 // Shift along an input so that the offset base moves to the first element.
11696 auto ShuffleOffset = [&](SDValue V) {
11700 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11701 for (int i = 0; i * Scale < NumElements; ++i) {
11702 int SrcIdx = i + Offset;
11703 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11705 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11708 // Found a valid zext mask! Try various lowering strategies based on the
11709 // input type and available ISA extensions.
11710 // TODO: Add AnyExt support.
11711 if (Subtarget.hasSSE41()) {
11712 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11713 // PUNPCK will catch this in a later shuffle match.
11714 if (Offset && Scale == 2 && VT.is128BitVector())
11716 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11717 NumElements / Scale);
11718 InputV = ShuffleOffset(InputV);
11719 InputV = getExtendInVec(ISD::ZERO_EXTEND, DL, ExtVT, InputV, DAG);
11720 return DAG.getBitcast(VT, InputV);
11723 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11725 // For any extends we can cheat for larger element sizes and use shuffle
11726 // instructions that can fold with a load and/or copy.
11727 if (AnyExt && EltBits == 32) {
11728 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11730 return DAG.getBitcast(
11731 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11732 DAG.getBitcast(MVT::v4i32, InputV),
11733 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11735 if (AnyExt && EltBits == 16 && Scale > 2) {
11736 int PSHUFDMask[4] = {Offset / 2, -1,
11737 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11738 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11739 DAG.getBitcast(MVT::v4i32, InputV),
11740 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11741 int PSHUFWMask[4] = {1, -1, -1, -1};
11742 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11743 return DAG.getBitcast(
11744 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11745 DAG.getBitcast(MVT::v8i16, InputV),
11746 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11749 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
11751 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
11752 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
11753 assert(VT.is128BitVector() && "Unexpected vector width!");
11755 int LoIdx = Offset * EltBits;
11756 SDValue Lo = DAG.getBitcast(
11757 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11758 DAG.getConstant(EltBits, DL, MVT::i8),
11759 DAG.getConstant(LoIdx, DL, MVT::i8)));
11761 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
11762 return DAG.getBitcast(VT, Lo);
11764 int HiIdx = (Offset + 1) * EltBits;
11765 SDValue Hi = DAG.getBitcast(
11766 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11767 DAG.getConstant(EltBits, DL, MVT::i8),
11768 DAG.getConstant(HiIdx, DL, MVT::i8)));
11769 return DAG.getBitcast(VT,
11770 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
11773 // If this would require more than 2 unpack instructions to expand, use
11774 // pshufb when available. We can only use more than 2 unpack instructions
11775 // when zero extending i8 elements which also makes it easier to use pshufb.
11776 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
11777 assert(NumElements == 16 && "Unexpected byte vector width!");
11778 SDValue PSHUFBMask[16];
11779 for (int i = 0; i < 16; ++i) {
11780 int Idx = Offset + (i / Scale);
11781 PSHUFBMask[i] = DAG.getConstant(
11782 (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
11784 InputV = DAG.getBitcast(MVT::v16i8, InputV);
11785 return DAG.getBitcast(
11786 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
11787 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
11790 // If we are extending from an offset, ensure we start on a boundary that
11791 // we can unpack from.
11792 int AlignToUnpack = Offset % (NumElements / Scale);
11793 if (AlignToUnpack) {
11794 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11795 for (int i = AlignToUnpack; i < NumElements; ++i)
11796 ShMask[i - AlignToUnpack] = i;
11797 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
11798 Offset -= AlignToUnpack;
11801 // Otherwise emit a sequence of unpacks.
11803 unsigned UnpackLoHi = X86ISD::UNPCKL;
11804 if (Offset >= (NumElements / 2)) {
11805 UnpackLoHi = X86ISD::UNPCKH;
11806 Offset -= (NumElements / 2);
11809 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
11810 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
11811 : getZeroVector(InputVT, Subtarget, DAG, DL);
11812 InputV = DAG.getBitcast(InputVT, InputV);
11813 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
11817 } while (Scale > 1);
11818 return DAG.getBitcast(VT, InputV);
11821 /// Try to lower a vector shuffle as a zero extension on any microarch.
11823 /// This routine will try to do everything in its power to cleverly lower
11824 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
11825 /// check for the profitability of this lowering, it tries to aggressively
11826 /// match this pattern. It will use all of the micro-architectural details it
11827 /// can to emit an efficient lowering. It handles both blends with all-zero
11828 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
11829 /// masking out later).
11831 /// The reason we have dedicated lowering for zext-style shuffles is that they
11832 /// are both incredibly common and often quite performance sensitive.
11833 static SDValue lowerShuffleAsZeroOrAnyExtend(
11834 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11835 const APInt &Zeroable, const X86Subtarget &Subtarget,
11836 SelectionDAG &DAG) {
11837 int Bits = VT.getSizeInBits();
11838 int NumLanes = Bits / 128;
11839 int NumElements = VT.getVectorNumElements();
11840 int NumEltsPerLane = NumElements / NumLanes;
11841 assert(VT.getScalarSizeInBits() <= 32 &&
11842 "Exceeds 32-bit integer zero extension limit");
11843 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
11845 // Define a helper function to check a particular ext-scale and lower to it if
11847 auto Lower = [&](int Scale) -> SDValue {
11849 bool AnyExt = true;
11852 for (int i = 0; i < NumElements; ++i) {
11855 continue; // Valid anywhere but doesn't tell us anything.
11856 if (i % Scale != 0) {
11857 // Each of the extended elements need to be zeroable.
11861 // We no longer are in the anyext case.
11866 // Each of the base elements needs to be consecutive indices into the
11867 // same input vector.
11868 SDValue V = M < NumElements ? V1 : V2;
11869 M = M % NumElements;
11872 Offset = M - (i / Scale);
11873 } else if (InputV != V)
11874 return SDValue(); // Flip-flopping inputs.
11876 // Offset must start in the lowest 128-bit lane or at the start of an
11878 // FIXME: Is it ever worth allowing a negative base offset?
11879 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
11880 (Offset % NumEltsPerLane) == 0))
11883 // If we are offsetting, all referenced entries must come from the same
11885 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
11888 if ((M % NumElements) != (Offset + (i / Scale)))
11889 return SDValue(); // Non-consecutive strided elements.
11893 // If we fail to find an input, we have a zero-shuffle which should always
11894 // have already been handled.
11895 // FIXME: Maybe handle this here in case during blending we end up with one?
11899 // If we are offsetting, don't extend if we only match a single input, we
11900 // can always do better by using a basic PSHUF or PUNPCK.
11901 if (Offset != 0 && Matches < 2)
11904 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
11905 InputV, Mask, Subtarget, DAG);
11908 // The widest scale possible for extending is to a 64-bit integer.
11909 assert(Bits % 64 == 0 &&
11910 "The number of bits in a vector must be divisible by 64 on x86!");
11911 int NumExtElements = Bits / 64;
11913 // Each iteration, try extending the elements half as much, but into twice as
11915 for (; NumExtElements < NumElements; NumExtElements *= 2) {
11916 assert(NumElements % NumExtElements == 0 &&
11917 "The input vector size must be divisible by the extended size.");
11918 if (SDValue V = Lower(NumElements / NumExtElements))
11922 // General extends failed, but 128-bit vectors may be able to use MOVQ.
11926 // Returns one of the source operands if the shuffle can be reduced to a
11927 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
11928 auto CanZExtLowHalf = [&]() {
11929 for (int i = NumElements / 2; i != NumElements; ++i)
11932 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
11934 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
11939 if (SDValue V = CanZExtLowHalf()) {
11940 V = DAG.getBitcast(MVT::v2i64, V);
11941 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
11942 return DAG.getBitcast(VT, V);
11945 // No viable ext lowering found.
11949 /// Try to get a scalar value for a specific element of a vector.
11951 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
11952 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
11953 SelectionDAG &DAG) {
11954 MVT VT = V.getSimpleValueType();
11955 MVT EltVT = VT.getVectorElementType();
11956 V = peekThroughBitcasts(V);
11958 // If the bitcasts shift the element size, we can't extract an equivalent
11959 // element from it.
11960 MVT NewVT = V.getSimpleValueType();
11961 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
11964 if (V.getOpcode() == ISD::BUILD_VECTOR ||
11965 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
11966 // Ensure the scalar operand is the same size as the destination.
11967 // FIXME: Add support for scalar truncation where possible.
11968 SDValue S = V.getOperand(Idx);
11969 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
11970 return DAG.getBitcast(EltVT, S);
11976 /// Helper to test for a load that can be folded with x86 shuffles.
11978 /// This is particularly important because the set of instructions varies
11979 /// significantly based on whether the operand is a load or not.
11980 static bool isShuffleFoldableLoad(SDValue V) {
11981 V = peekThroughBitcasts(V);
11982 return ISD::isNON_EXTLoad(V.getNode());
11985 /// Try to lower insertion of a single element into a zero vector.
11987 /// This is a common pattern that we have especially efficient patterns to lower
11988 /// across all subtarget feature sets.
11989 static SDValue lowerShuffleAsElementInsertion(
11990 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11991 const APInt &Zeroable, const X86Subtarget &Subtarget,
11992 SelectionDAG &DAG) {
11994 MVT EltVT = VT.getVectorElementType();
11997 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
11999 bool IsV1Zeroable = true;
12000 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12001 if (i != V2Index && !Zeroable[i]) {
12002 IsV1Zeroable = false;
12006 // Check for a single input from a SCALAR_TO_VECTOR node.
12007 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12008 // all the smarts here sunk into that routine. However, the current
12009 // lowering of BUILD_VECTOR makes that nearly impossible until the old
12010 // vector shuffle lowering is dead.
12011 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12013 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12014 // We need to zext the scalar if it is smaller than an i32.
12015 V2S = DAG.getBitcast(EltVT, V2S);
12016 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12017 // Using zext to expand a narrow element won't work for non-zero
12022 // Zero-extend directly to i32.
12023 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12024 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12026 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12027 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12028 EltVT == MVT::i16) {
12029 // Either not inserting from the low element of the input or the input
12030 // element size is too small to use VZEXT_MOVL to clear the high bits.
12034 if (!IsV1Zeroable) {
12035 // If V1 can't be treated as a zero vector we have fewer options to lower
12036 // this. We can't support integer vectors or non-zero targets cheaply, and
12037 // the V1 elements can't be permuted in any way.
12038 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12039 if (!VT.isFloatingPoint() || V2Index != 0)
12041 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12042 V1Mask[V2Index] = -1;
12043 if (!isNoopShuffleMask(V1Mask))
12045 if (!VT.is128BitVector())
12048 // Otherwise, use MOVSD or MOVSS.
12049 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12050 "Only two types of floating point element types to handle!");
12051 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12055 // This lowering only works for the low element with floating point vectors.
12056 if (VT.isFloatingPoint() && V2Index != 0)
12059 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12061 V2 = DAG.getBitcast(VT, V2);
12063 if (V2Index != 0) {
12064 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12065 // the desired position. Otherwise it is more efficient to do a vector
12066 // shift left. We know that we can do a vector shift left because all
12067 // the inputs are zero.
12068 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12069 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12070 V2Shuffle[V2Index] = 0;
12071 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12073 V2 = DAG.getBitcast(MVT::v16i8, V2);
12075 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12076 DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12077 V2 = DAG.getBitcast(VT, V2);
12083 /// Try to lower broadcast of a single - truncated - integer element,
12084 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12086 /// This assumes we have AVX2.
12087 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12089 const X86Subtarget &Subtarget,
12090 SelectionDAG &DAG) {
12091 assert(Subtarget.hasAVX2() &&
12092 "We can only lower integer broadcasts with AVX2!");
12094 EVT EltVT = VT.getVectorElementType();
12095 EVT V0VT = V0.getValueType();
12097 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12098 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12100 EVT V0EltVT = V0VT.getVectorElementType();
12101 if (!V0EltVT.isInteger())
12104 const unsigned EltSize = EltVT.getSizeInBits();
12105 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12107 // This is only a truncation if the original element type is larger.
12108 if (V0EltSize <= EltSize)
12111 assert(((V0EltSize % EltSize) == 0) &&
12112 "Scalar type sizes must all be powers of 2 on x86!");
12114 const unsigned V0Opc = V0.getOpcode();
12115 const unsigned Scale = V0EltSize / EltSize;
12116 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12118 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12119 V0Opc != ISD::BUILD_VECTOR)
12122 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12124 // If we're extracting non-least-significant bits, shift so we can truncate.
12125 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12126 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12127 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12128 if (const int OffsetIdx = BroadcastIdx % Scale)
12129 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12130 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12132 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12133 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12136 /// Test whether this can be lowered with a single SHUFPS instruction.
12138 /// This is used to disable more specialized lowerings when the shufps lowering
12139 /// will happen to be efficient.
12140 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12141 // This routine only handles 128-bit shufps.
12142 assert(Mask.size() == 4 && "Unsupported mask size!");
12143 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12144 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12145 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12146 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12148 // To lower with a single SHUFPS we need to have the low half and high half
12149 // each requiring a single input.
12150 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12152 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12158 /// If we are extracting two 128-bit halves of a vector and shuffling the
12159 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12160 /// multi-shuffle lowering.
12161 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12162 SDValue N1, ArrayRef<int> Mask,
12163 SelectionDAG &DAG) {
12164 EVT VT = N0.getValueType();
12165 assert((VT.is128BitVector() &&
12166 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12167 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12169 // Check that both sources are extracts of the same source vector.
12170 if (!N0.hasOneUse() || !N1.hasOneUse() ||
12171 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12172 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12173 N0.getOperand(0) != N1.getOperand(0))
12176 SDValue WideVec = N0.getOperand(0);
12177 EVT WideVT = WideVec.getValueType();
12178 if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12179 !isa<ConstantSDNode>(N1.getOperand(1)))
12182 // Match extracts of each half of the wide source vector. Commute the shuffle
12183 // if the extract of the low half is N1.
12184 unsigned NumElts = VT.getVectorNumElements();
12185 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12186 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12187 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12188 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12189 ShuffleVectorSDNode::commuteMask(NewMask);
12190 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12193 // Final bailout: if the mask is simple, we are better off using an extract
12194 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12195 // because that avoids a constant load from memory.
12196 if (NumElts == 4 &&
12197 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12200 // Extend the shuffle mask with undef elements.
12201 NewMask.append(NumElts, -1);
12203 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12204 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12206 // This is free: ymm -> xmm.
12207 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12208 DAG.getIntPtrConstant(0, DL));
12211 /// Try to lower broadcast of a single element.
12213 /// For convenience, this code also bundles all of the subtarget feature set
12214 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12215 /// a convenient way to factor it out.
12216 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12217 SDValue V2, ArrayRef<int> Mask,
12218 const X86Subtarget &Subtarget,
12219 SelectionDAG &DAG) {
12220 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12221 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12222 (Subtarget.hasAVX2() && VT.isInteger())))
12225 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12226 // we can only broadcast from a register with AVX2.
12227 unsigned NumElts = Mask.size();
12228 unsigned NumEltBits = VT.getScalarSizeInBits();
12229 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12231 : X86ISD::VBROADCAST;
12232 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12234 // Check that the mask is a broadcast.
12235 int BroadcastIdx = -1;
12236 for (int i = 0; i != (int)NumElts; ++i) {
12237 SmallVector<int, 8> BroadcastMask(NumElts, i);
12238 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12244 if (BroadcastIdx < 0)
12246 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12247 "a sorted mask where the broadcast "
12250 // Go up the chain of (vector) values to find a scalar load that we can
12251 // combine with the broadcast.
12252 int BitOffset = BroadcastIdx * NumEltBits;
12255 switch (V.getOpcode()) {
12256 case ISD::BITCAST: {
12257 V = V.getOperand(0);
12260 case ISD::CONCAT_VECTORS: {
12261 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12262 int OpIdx = BitOffset / OpBitWidth;
12263 V = V.getOperand(OpIdx);
12264 BitOffset %= OpBitWidth;
12267 case ISD::INSERT_SUBVECTOR: {
12268 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12269 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12273 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12274 int Idx = (int)ConstantIdx->getZExtValue();
12275 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12276 int BeginOffset = Idx * EltBitWidth;
12277 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12278 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12279 BitOffset -= BeginOffset;
12289 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12290 BroadcastIdx = BitOffset / NumEltBits;
12292 // Do we need to bitcast the source to retrieve the original broadcast index?
12293 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12295 // Check if this is a broadcast of a scalar. We special case lowering
12296 // for scalars so that we can more effectively fold with loads.
12297 // If the original value has a larger element type than the shuffle, the
12298 // broadcast element is in essence truncated. Make that explicit to ease
12300 if (BitCastSrc && VT.isInteger())
12301 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12302 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12303 return TruncBroadcast;
12305 MVT BroadcastVT = VT;
12307 // Also check the simpler case, where we can directly reuse the scalar.
12309 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12310 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12311 V = V.getOperand(BroadcastIdx);
12313 // If we can't broadcast from a register, check that the input is a load.
12314 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12316 } else if (MayFoldLoad(V) && !cast<LoadSDNode>(V)->isVolatile()) {
12317 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12318 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12319 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12320 Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12325 // If we are broadcasting a load that is only used by the shuffle
12326 // then we can reduce the vector load to the broadcasted scalar load.
12327 LoadSDNode *Ld = cast<LoadSDNode>(V);
12328 SDValue BaseAddr = Ld->getOperand(1);
12329 EVT SVT = BroadcastVT.getScalarType();
12330 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12331 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12332 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12333 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12334 DAG.getMachineFunction().getMachineMemOperand(
12335 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12336 DAG.makeEquivalentMemoryOrdering(Ld, V);
12337 } else if (!BroadcastFromReg) {
12338 // We can't broadcast from a vector register.
12340 } else if (BitOffset != 0) {
12341 // We can only broadcast from the zero-element of a vector register,
12342 // but it can be advantageous to broadcast from the zero-element of a
12344 if (!VT.is256BitVector() && !VT.is512BitVector())
12347 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12348 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12351 // Only broadcast the zero-element of a 128-bit subvector.
12352 if ((BitOffset % 128) != 0)
12355 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12356 "Unexpected bit-offset");
12357 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12358 "Unexpected vector size");
12359 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12360 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12363 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12364 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12365 DAG.getBitcast(MVT::f64, V));
12367 // Bitcast back to the same scalar type as BroadcastVT.
12368 if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12369 assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12370 "Unexpected vector element size");
12372 if (V.getValueType().isVector()) {
12373 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12374 ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12376 ExtVT = BroadcastVT.getScalarType();
12378 V = DAG.getBitcast(ExtVT, V);
12381 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12382 if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12383 V = DAG.getBitcast(MVT::f64, V);
12384 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12385 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12388 // We only support broadcasting from 128-bit vectors to minimize the
12389 // number of patterns we need to deal with in isel. So extract down to
12390 // 128-bits, removing as many bitcasts as possible.
12391 if (V.getValueSizeInBits() > 128) {
12392 MVT ExtVT = V.getSimpleValueType().getScalarType();
12393 ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12394 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12395 V = DAG.getBitcast(ExtVT, V);
12398 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12401 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12402 // INSERTPS when the V1 elements are already in the correct locations
12403 // because otherwise we can just always use two SHUFPS instructions which
12404 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12405 // perform INSERTPS if a single V1 element is out of place and all V2
12406 // elements are zeroable.
12407 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12408 unsigned &InsertPSMask,
12409 const APInt &Zeroable,
12410 ArrayRef<int> Mask, SelectionDAG &DAG) {
12411 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12412 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12413 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12415 // Attempt to match INSERTPS with one element from VA or VB being
12416 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12418 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12419 ArrayRef<int> CandidateMask) {
12420 unsigned ZMask = 0;
12421 int VADstIndex = -1;
12422 int VBDstIndex = -1;
12423 bool VAUsedInPlace = false;
12425 for (int i = 0; i < 4; ++i) {
12426 // Synthesize a zero mask from the zeroable elements (includes undefs).
12432 // Flag if we use any VA inputs in place.
12433 if (i == CandidateMask[i]) {
12434 VAUsedInPlace = true;
12438 // We can only insert a single non-zeroable element.
12439 if (VADstIndex >= 0 || VBDstIndex >= 0)
12442 if (CandidateMask[i] < 4) {
12443 // VA input out of place for insertion.
12446 // VB input for insertion.
12451 // Don't bother if we have no (non-zeroable) element for insertion.
12452 if (VADstIndex < 0 && VBDstIndex < 0)
12455 // Determine element insertion src/dst indices. The src index is from the
12456 // start of the inserted vector, not the start of the concatenated vector.
12457 unsigned VBSrcIndex = 0;
12458 if (VADstIndex >= 0) {
12459 // If we have a VA input out of place, we use VA as the V2 element
12460 // insertion and don't use the original V2 at all.
12461 VBSrcIndex = CandidateMask[VADstIndex];
12462 VBDstIndex = VADstIndex;
12465 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12468 // If no V1 inputs are used in place, then the result is created only from
12469 // the zero mask and the V2 insertion - so remove V1 dependency.
12470 if (!VAUsedInPlace)
12471 VA = DAG.getUNDEF(MVT::v4f32);
12473 // Update V1, V2 and InsertPSMask accordingly.
12477 // Insert the V2 element into the desired position.
12478 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12479 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12483 if (matchAsInsertPS(V1, V2, Mask))
12486 // Commute and try again.
12487 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12488 ShuffleVectorSDNode::commuteMask(CommutedMask);
12489 if (matchAsInsertPS(V2, V1, CommutedMask))
12495 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12496 ArrayRef<int> Mask, const APInt &Zeroable,
12497 SelectionDAG &DAG) {
12498 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12499 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12501 // Attempt to match the insertps pattern.
12502 unsigned InsertPSMask;
12503 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12506 // Insert the V2 element into the desired position.
12507 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12508 DAG.getConstant(InsertPSMask, DL, MVT::i8));
12511 /// Try to lower a shuffle as a permute of the inputs followed by an
12512 /// UNPCK instruction.
12514 /// This specifically targets cases where we end up with alternating between
12515 /// the two inputs, and so can permute them into something that feeds a single
12516 /// UNPCK instruction. Note that this routine only targets integer vectors
12517 /// because for floating point vectors we have a generalized SHUFPS lowering
12518 /// strategy that handles everything that doesn't *exactly* match an unpack,
12519 /// making this clever lowering unnecessary.
12520 static SDValue lowerShuffleAsPermuteAndUnpack(
12521 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12522 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12523 assert(!VT.isFloatingPoint() &&
12524 "This routine only supports integer vectors.");
12525 assert(VT.is128BitVector() &&
12526 "This routine only works on 128-bit vectors.");
12527 assert(!V2.isUndef() &&
12528 "This routine should only be used when blending two inputs.");
12529 assert(Mask.size() >= 2 && "Single element masks are invalid.");
12531 int Size = Mask.size();
12534 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
12536 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
12538 bool UnpackLo = NumLoInputs >= NumHiInputs;
12540 auto TryUnpack = [&](int ScalarSize, int Scale) {
12541 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
12542 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
12544 for (int i = 0; i < Size; ++i) {
12548 // Each element of the unpack contains Scale elements from this mask.
12549 int UnpackIdx = i / Scale;
12551 // We only handle the case where V1 feeds the first slots of the unpack.
12552 // We rely on canonicalization to ensure this is the case.
12553 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
12556 // Setup the mask for this input. The indexing is tricky as we have to
12557 // handle the unpack stride.
12558 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
12559 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
12563 // If we will have to shuffle both inputs to use the unpack, check whether
12564 // we can just unpack first and shuffle the result. If so, skip this unpack.
12565 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
12566 !isNoopShuffleMask(V2Mask))
12569 // Shuffle the inputs into place.
12570 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12571 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12573 // Cast the inputs to the type we will use to unpack them.
12574 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
12575 V1 = DAG.getBitcast(UnpackVT, V1);
12576 V2 = DAG.getBitcast(UnpackVT, V2);
12578 // Unpack the inputs and cast the result back to the desired type.
12579 return DAG.getBitcast(
12580 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
12581 UnpackVT, V1, V2));
12584 // We try each unpack from the largest to the smallest to try and find one
12585 // that fits this mask.
12586 int OrigScalarSize = VT.getScalarSizeInBits();
12587 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
12588 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
12591 // If we're shuffling with a zero vector then we're better off not doing
12592 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
12593 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
12594 ISD::isBuildVectorAllZeros(V2.getNode()))
12597 // If none of the unpack-rooted lowerings worked (or were profitable) try an
12599 if (NumLoInputs == 0 || NumHiInputs == 0) {
12600 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
12601 "We have to have *some* inputs!");
12602 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
12604 // FIXME: We could consider the total complexity of the permute of each
12605 // possible unpacking. Or at the least we should consider how many
12606 // half-crossings are created.
12607 // FIXME: We could consider commuting the unpacks.
12609 SmallVector<int, 32> PermMask((unsigned)Size, -1);
12610 for (int i = 0; i < Size; ++i) {
12614 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
12617 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
12619 return DAG.getVectorShuffle(
12620 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
12622 DAG.getUNDEF(VT), PermMask);
12628 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12630 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12631 /// support for floating point shuffles but not integer shuffles. These
12632 /// instructions will incur a domain crossing penalty on some chips though so
12633 /// it is better to avoid lowering through this for integer vectors where
12635 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12636 const APInt &Zeroable, SDValue V1, SDValue V2,
12637 const X86Subtarget &Subtarget,
12638 SelectionDAG &DAG) {
12639 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12640 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12641 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12643 if (V2.isUndef()) {
12644 // Check for being able to broadcast a single element.
12645 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12646 Mask, Subtarget, DAG))
12649 // Straight shuffle of a single input vector. Simulate this by using the
12650 // single input as both of the "inputs" to this instruction..
12651 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12653 if (Subtarget.hasAVX()) {
12654 // If we have AVX, we can use VPERMILPS which will allow folding a load
12655 // into the shuffle.
12656 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12657 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12660 return DAG.getNode(
12661 X86ISD::SHUFP, DL, MVT::v2f64,
12662 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12663 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12664 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12666 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12667 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12668 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12669 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12671 if (Subtarget.hasAVX2())
12672 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12675 // When loading a scalar and then shuffling it into a vector we can often do
12676 // the insertion cheaply.
12677 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12678 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12680 // Try inverting the insertion since for v2 masks it is easy to do and we
12681 // can't reliably sort the mask one way or the other.
12682 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12683 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12684 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12685 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12688 // Try to use one of the special instruction patterns to handle two common
12689 // blend patterns if a zero-blend above didn't work.
12690 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
12691 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
12692 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12693 // We can either use a special instruction to load over the low double or
12694 // to move just the low double.
12695 return DAG.getNode(
12696 X86ISD::MOVSD, DL, MVT::v2f64, V2,
12697 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12699 if (Subtarget.hasSSE41())
12700 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12701 Zeroable, Subtarget, DAG))
12704 // Use dedicated unpack instructions for masks that match their pattern.
12705 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12708 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12709 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12710 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
12713 /// Handle lowering of 2-lane 64-bit integer shuffles.
12715 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12716 /// the integer unit to minimize domain crossing penalties. However, for blends
12717 /// it falls back to the floating point shuffle operation with appropriate bit
12719 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12720 const APInt &Zeroable, SDValue V1, SDValue V2,
12721 const X86Subtarget &Subtarget,
12722 SelectionDAG &DAG) {
12723 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12724 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12725 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12727 if (V2.isUndef()) {
12728 // Check for being able to broadcast a single element.
12729 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12730 Mask, Subtarget, DAG))
12733 // Straight shuffle of a single input vector. For everything from SSE2
12734 // onward this has a single fast instruction with no scary immediates.
12735 // We have to map the mask as it is actually a v4i32 shuffle instruction.
12736 V1 = DAG.getBitcast(MVT::v4i32, V1);
12737 int WidenedMask[4] = {
12738 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
12739 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
12740 return DAG.getBitcast(
12742 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12743 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12745 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
12746 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
12747 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12748 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12750 if (Subtarget.hasAVX2())
12751 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12754 // Try to use shift instructions.
12755 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
12756 Zeroable, Subtarget, DAG))
12759 // When loading a scalar and then shuffling it into a vector we can often do
12760 // the insertion cheaply.
12761 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12762 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12764 // Try inverting the insertion since for v2 masks it is easy to do and we
12765 // can't reliably sort the mask one way or the other.
12766 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
12767 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12768 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12771 // We have different paths for blend lowering, but they all must use the
12772 // *exact* same predicate.
12773 bool IsBlendSupported = Subtarget.hasSSE41();
12774 if (IsBlendSupported)
12775 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
12776 Zeroable, Subtarget, DAG))
12779 // Use dedicated unpack instructions for masks that match their pattern.
12780 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
12783 // Try to use byte rotation instructions.
12784 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
12785 if (Subtarget.hasSSSE3()) {
12786 if (Subtarget.hasVLX())
12787 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
12791 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
12796 // If we have direct support for blends, we should lower by decomposing into
12797 // a permute. That will be faster than the domain cross.
12798 if (IsBlendSupported)
12799 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
12802 // We implement this with SHUFPD which is pretty lame because it will likely
12803 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
12804 // However, all the alternatives are still more cycles and newer chips don't
12805 // have this problem. It would be really nice if x86 had better shuffles here.
12806 V1 = DAG.getBitcast(MVT::v2f64, V1);
12807 V2 = DAG.getBitcast(MVT::v2f64, V2);
12808 return DAG.getBitcast(MVT::v2i64,
12809 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
12812 /// Lower a vector shuffle using the SHUFPS instruction.
12814 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
12815 /// It makes no assumptions about whether this is the *best* lowering, it simply
12817 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
12818 ArrayRef<int> Mask, SDValue V1,
12819 SDValue V2, SelectionDAG &DAG) {
12820 SDValue LowV = V1, HighV = V2;
12821 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
12823 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12825 if (NumV2Elements == 1) {
12826 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
12828 // Compute the index adjacent to V2Index and in the same half by toggling
12830 int V2AdjIndex = V2Index ^ 1;
12832 if (Mask[V2AdjIndex] < 0) {
12833 // Handles all the cases where we have a single V2 element and an undef.
12834 // This will only ever happen in the high lanes because we commute the
12835 // vector otherwise.
12837 std::swap(LowV, HighV);
12838 NewMask[V2Index] -= 4;
12840 // Handle the case where the V2 element ends up adjacent to a V1 element.
12841 // To make this work, blend them together as the first step.
12842 int V1Index = V2AdjIndex;
12843 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
12844 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
12845 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12847 // Now proceed to reconstruct the final blend as we have the necessary
12848 // high or low half formed.
12855 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
12856 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
12858 } else if (NumV2Elements == 2) {
12859 if (Mask[0] < 4 && Mask[1] < 4) {
12860 // Handle the easy case where we have V1 in the low lanes and V2 in the
12864 } else if (Mask[2] < 4 && Mask[3] < 4) {
12865 // We also handle the reversed case because this utility may get called
12866 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
12867 // arrange things in the right direction.
12873 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
12874 // trying to place elements directly, just blend them and set up the final
12875 // shuffle to place them.
12877 // The first two blend mask elements are for V1, the second two are for
12879 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
12880 Mask[2] < 4 ? Mask[2] : Mask[3],
12881 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
12882 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
12883 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12884 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12886 // Now we do a normal shuffle of V1 by giving V1 as both operands to
12889 NewMask[0] = Mask[0] < 4 ? 0 : 2;
12890 NewMask[1] = Mask[0] < 4 ? 2 : 0;
12891 NewMask[2] = Mask[2] < 4 ? 1 : 3;
12892 NewMask[3] = Mask[2] < 4 ? 3 : 1;
12895 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
12896 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
12899 /// Lower 4-lane 32-bit floating point shuffles.
12901 /// Uses instructions exclusively from the floating point unit to minimize
12902 /// domain crossing penalties, as these are sufficient to implement all v4f32
12904 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12905 const APInt &Zeroable, SDValue V1, SDValue V2,
12906 const X86Subtarget &Subtarget,
12907 SelectionDAG &DAG) {
12908 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12909 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12910 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12912 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12914 if (NumV2Elements == 0) {
12915 // Check for being able to broadcast a single element.
12916 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
12917 Mask, Subtarget, DAG))
12920 // Use even/odd duplicate instructions for masks that match their pattern.
12921 if (Subtarget.hasSSE3()) {
12922 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
12923 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
12924 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
12925 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
12928 if (Subtarget.hasAVX()) {
12929 // If we have AVX, we can use VPERMILPS which will allow folding a load
12930 // into the shuffle.
12931 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
12932 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12935 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
12936 // in SSE1 because otherwise they are widened to v2f64 and never get here.
12937 if (!Subtarget.hasSSE2()) {
12938 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
12939 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
12940 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
12941 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
12944 // Otherwise, use a straight shuffle of a single input vector. We pass the
12945 // input vector to both operands to simulate this with a SHUFPS.
12946 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
12947 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12950 if (Subtarget.hasAVX2())
12951 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12954 // There are special ways we can lower some single-element blends. However, we
12955 // have custom ways we can lower more complex single-element blends below that
12956 // we defer to if both this and BLENDPS fail to match, so restrict this to
12957 // when the V2 input is targeting element 0 of the mask -- that is the fast
12959 if (NumV2Elements == 1 && Mask[0] >= 4)
12960 if (SDValue V = lowerShuffleAsElementInsertion(
12961 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
12964 if (Subtarget.hasSSE41()) {
12965 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
12966 Zeroable, Subtarget, DAG))
12969 // Use INSERTPS if we can complete the shuffle efficiently.
12970 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
12973 if (!isSingleSHUFPSMask(Mask))
12974 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
12979 // Use low/high mov instructions. These are only valid in SSE1 because
12980 // otherwise they are widened to v2f64 and never get here.
12981 if (!Subtarget.hasSSE2()) {
12982 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
12983 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
12984 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
12985 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
12988 // Use dedicated unpack instructions for masks that match their pattern.
12989 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
12992 // Otherwise fall back to a SHUFPS lowering strategy.
12993 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
12996 /// Lower 4-lane i32 vector shuffles.
12998 /// We try to handle these with integer-domain shuffles where we can, but for
12999 /// blends we use the floating point domain blend instructions.
13000 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13001 const APInt &Zeroable, SDValue V1, SDValue V2,
13002 const X86Subtarget &Subtarget,
13003 SelectionDAG &DAG) {
13004 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13005 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13006 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13008 // Whenever we can lower this as a zext, that instruction is strictly faster
13009 // than any alternative. It also allows us to fold memory operands into the
13010 // shuffle in many cases.
13011 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13012 Zeroable, Subtarget, DAG))
13015 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13017 if (NumV2Elements == 0) {
13018 // Check for being able to broadcast a single element.
13019 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13020 Mask, Subtarget, DAG))
13023 // Straight shuffle of a single input vector. For everything from SSE2
13024 // onward this has a single fast instruction with no scary immediates.
13025 // We coerce the shuffle pattern to be compatible with UNPCK instructions
13026 // but we aren't actually going to use the UNPCK instruction because doing
13027 // so prevents folding a load into this instruction or making a copy.
13028 const int UnpackLoMask[] = {0, 0, 1, 1};
13029 const int UnpackHiMask[] = {2, 2, 3, 3};
13030 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13031 Mask = UnpackLoMask;
13032 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13033 Mask = UnpackHiMask;
13035 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13036 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13039 if (Subtarget.hasAVX2())
13040 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13043 // Try to use shift instructions.
13044 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13045 Zeroable, Subtarget, DAG))
13048 // There are special ways we can lower some single-element blends.
13049 if (NumV2Elements == 1)
13050 if (SDValue V = lowerShuffleAsElementInsertion(
13051 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13054 // We have different paths for blend lowering, but they all must use the
13055 // *exact* same predicate.
13056 bool IsBlendSupported = Subtarget.hasSSE41();
13057 if (IsBlendSupported)
13058 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13059 Zeroable, Subtarget, DAG))
13062 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13063 Zeroable, Subtarget, DAG))
13066 // Use dedicated unpack instructions for masks that match their pattern.
13067 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13070 // Try to use byte rotation instructions.
13071 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13072 if (Subtarget.hasSSSE3()) {
13073 if (Subtarget.hasVLX())
13074 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13078 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13083 // Assume that a single SHUFPS is faster than an alternative sequence of
13084 // multiple instructions (even if the CPU has a domain penalty).
13085 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13086 if (!isSingleSHUFPSMask(Mask)) {
13087 // If we have direct support for blends, we should lower by decomposing into
13088 // a permute. That will be faster than the domain cross.
13089 if (IsBlendSupported)
13090 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13093 // Try to lower by permuting the inputs into an unpack instruction.
13094 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13095 Mask, Subtarget, DAG))
13099 // We implement this with SHUFPS because it can blend from two vectors.
13100 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13101 // up the inputs, bypassing domain shift penalties that we would incur if we
13102 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13104 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13105 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13106 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13107 return DAG.getBitcast(MVT::v4i32, ShufPS);
13110 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13111 /// shuffle lowering, and the most complex part.
13113 /// The lowering strategy is to try to form pairs of input lanes which are
13114 /// targeted at the same half of the final vector, and then use a dword shuffle
13115 /// to place them onto the right half, and finally unpack the paired lanes into
13116 /// their final position.
13118 /// The exact breakdown of how to form these dword pairs and align them on the
13119 /// correct sides is really tricky. See the comments within the function for
13120 /// more of the details.
13122 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13123 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13124 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13125 /// vector, form the analogous 128-bit 8-element Mask.
13126 static SDValue lowerV8I16GeneralSingleInputShuffle(
13127 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13128 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13129 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13130 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13132 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13133 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13134 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13136 // Attempt to directly match PSHUFLW or PSHUFHW.
13137 if (isUndefOrInRange(LoMask, 0, 4) &&
13138 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13139 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13140 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13142 if (isUndefOrInRange(HiMask, 4, 8) &&
13143 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13144 for (int i = 0; i != 4; ++i)
13145 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13146 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13147 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13150 SmallVector<int, 4> LoInputs;
13151 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13152 array_pod_sort(LoInputs.begin(), LoInputs.end());
13153 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13154 SmallVector<int, 4> HiInputs;
13155 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13156 array_pod_sort(HiInputs.begin(), HiInputs.end());
13157 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13158 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13159 int NumHToL = LoInputs.size() - NumLToL;
13160 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13161 int NumHToH = HiInputs.size() - NumLToH;
13162 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13163 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13164 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13165 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13167 // If we are shuffling values from one half - check how many different DWORD
13168 // pairs we need to create. If only 1 or 2 then we can perform this as a
13169 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13170 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13171 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13172 V = DAG.getNode(ShufWOp, DL, VT, V,
13173 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13174 V = DAG.getBitcast(PSHUFDVT, V);
13175 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13176 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13177 return DAG.getBitcast(VT, V);
13180 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13181 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13182 SmallVector<std::pair<int, int>, 4> DWordPairs;
13183 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13185 // Collect the different DWORD pairs.
13186 for (int DWord = 0; DWord != 4; ++DWord) {
13187 int M0 = Mask[2 * DWord + 0];
13188 int M1 = Mask[2 * DWord + 1];
13189 M0 = (M0 >= 0 ? M0 % 4 : M0);
13190 M1 = (M1 >= 0 ? M1 % 4 : M1);
13191 if (M0 < 0 && M1 < 0)
13194 bool Match = false;
13195 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13196 auto &DWordPair = DWordPairs[j];
13197 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13198 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13199 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13200 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13201 PSHUFDMask[DWord] = DOffset + j;
13207 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13208 DWordPairs.push_back(std::make_pair(M0, M1));
13212 if (DWordPairs.size() <= 2) {
13213 DWordPairs.resize(2, std::make_pair(-1, -1));
13214 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13215 DWordPairs[1].first, DWordPairs[1].second};
13216 if ((NumHToL + NumHToH) == 0)
13217 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13218 if ((NumLToL + NumLToH) == 0)
13219 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13223 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13224 // such inputs we can swap two of the dwords across the half mark and end up
13225 // with <=2 inputs to each half in each half. Once there, we can fall through
13226 // to the generic code below. For example:
13228 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13229 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13231 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13232 // and an existing 2-into-2 on the other half. In this case we may have to
13233 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13234 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13235 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13236 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13237 // half than the one we target for fixing) will be fixed when we re-enter this
13238 // path. We will also combine away any sequence of PSHUFD instructions that
13239 // result into a single instruction. Here is an example of the tricky case:
13241 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13242 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13244 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13246 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13247 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13249 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13250 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13252 // The result is fine to be handled by the generic logic.
13253 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13254 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13255 int AOffset, int BOffset) {
13256 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13257 "Must call this with A having 3 or 1 inputs from the A half.");
13258 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13259 "Must call this with B having 1 or 3 inputs from the B half.");
13260 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13261 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13263 bool ThreeAInputs = AToAInputs.size() == 3;
13265 // Compute the index of dword with only one word among the three inputs in
13266 // a half by taking the sum of the half with three inputs and subtracting
13267 // the sum of the actual three inputs. The difference is the remaining
13269 int ADWord = 0, BDWord = 0;
13270 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13271 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13272 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13273 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13274 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13275 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13276 int TripleNonInputIdx =
13277 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13278 TripleDWord = TripleNonInputIdx / 2;
13280 // We use xor with one to compute the adjacent DWord to whichever one the
13282 OneInputDWord = (OneInput / 2) ^ 1;
13284 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13285 // and BToA inputs. If there is also such a problem with the BToB and AToB
13286 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13287 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13288 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13289 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13290 // Compute how many inputs will be flipped by swapping these DWords. We
13292 // to balance this to ensure we don't form a 3-1 shuffle in the other
13294 int NumFlippedAToBInputs =
13295 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13296 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13297 int NumFlippedBToBInputs =
13298 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13299 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13300 if ((NumFlippedAToBInputs == 1 &&
13301 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13302 (NumFlippedBToBInputs == 1 &&
13303 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13304 // We choose whether to fix the A half or B half based on whether that
13305 // half has zero flipped inputs. At zero, we may not be able to fix it
13306 // with that half. We also bias towards fixing the B half because that
13307 // will more commonly be the high half, and we have to bias one way.
13308 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13309 ArrayRef<int> Inputs) {
13310 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13311 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13312 // Determine whether the free index is in the flipped dword or the
13313 // unflipped dword based on where the pinned index is. We use this bit
13314 // in an xor to conditionally select the adjacent dword.
13315 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13316 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13317 if (IsFixIdxInput == IsFixFreeIdxInput)
13319 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13320 assert(IsFixIdxInput != IsFixFreeIdxInput &&
13321 "We need to be changing the number of flipped inputs!");
13322 int PSHUFHalfMask[] = {0, 1, 2, 3};
13323 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13325 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13326 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13327 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13329 for (int &M : Mask)
13330 if (M >= 0 && M == FixIdx)
13332 else if (M >= 0 && M == FixFreeIdx)
13335 if (NumFlippedBToBInputs != 0) {
13337 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13338 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13340 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13341 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13342 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13347 int PSHUFDMask[] = {0, 1, 2, 3};
13348 PSHUFDMask[ADWord] = BDWord;
13349 PSHUFDMask[BDWord] = ADWord;
13350 V = DAG.getBitcast(
13352 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13353 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13355 // Adjust the mask to match the new locations of A and B.
13356 for (int &M : Mask)
13357 if (M >= 0 && M/2 == ADWord)
13358 M = 2 * BDWord + M % 2;
13359 else if (M >= 0 && M/2 == BDWord)
13360 M = 2 * ADWord + M % 2;
13362 // Recurse back into this routine to re-compute state now that this isn't
13363 // a 3 and 1 problem.
13364 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13366 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13367 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13368 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13369 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13371 // At this point there are at most two inputs to the low and high halves from
13372 // each half. That means the inputs can always be grouped into dwords and
13373 // those dwords can then be moved to the correct half with a dword shuffle.
13374 // We use at most one low and one high word shuffle to collect these paired
13375 // inputs into dwords, and finally a dword shuffle to place them.
13376 int PSHUFLMask[4] = {-1, -1, -1, -1};
13377 int PSHUFHMask[4] = {-1, -1, -1, -1};
13378 int PSHUFDMask[4] = {-1, -1, -1, -1};
13380 // First fix the masks for all the inputs that are staying in their
13381 // original halves. This will then dictate the targets of the cross-half
13383 auto fixInPlaceInputs =
13384 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13385 MutableArrayRef<int> SourceHalfMask,
13386 MutableArrayRef<int> HalfMask, int HalfOffset) {
13387 if (InPlaceInputs.empty())
13389 if (InPlaceInputs.size() == 1) {
13390 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13391 InPlaceInputs[0] - HalfOffset;
13392 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13395 if (IncomingInputs.empty()) {
13396 // Just fix all of the in place inputs.
13397 for (int Input : InPlaceInputs) {
13398 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13399 PSHUFDMask[Input / 2] = Input / 2;
13404 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13405 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13406 InPlaceInputs[0] - HalfOffset;
13407 // Put the second input next to the first so that they are packed into
13408 // a dword. We find the adjacent index by toggling the low bit.
13409 int AdjIndex = InPlaceInputs[0] ^ 1;
13410 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13411 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13412 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13414 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13415 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13417 // Now gather the cross-half inputs and place them into a free dword of
13418 // their target half.
13419 // FIXME: This operation could almost certainly be simplified dramatically to
13420 // look more like the 3-1 fixing operation.
13421 auto moveInputsToRightHalf = [&PSHUFDMask](
13422 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13423 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13424 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13426 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13427 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13429 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13431 int LowWord = Word & ~1;
13432 int HighWord = Word | 1;
13433 return isWordClobbered(SourceHalfMask, LowWord) ||
13434 isWordClobbered(SourceHalfMask, HighWord);
13437 if (IncomingInputs.empty())
13440 if (ExistingInputs.empty()) {
13441 // Map any dwords with inputs from them into the right half.
13442 for (int Input : IncomingInputs) {
13443 // If the source half mask maps over the inputs, turn those into
13444 // swaps and use the swapped lane.
13445 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13446 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13447 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13448 Input - SourceOffset;
13449 // We have to swap the uses in our half mask in one sweep.
13450 for (int &M : HalfMask)
13451 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13453 else if (M == Input)
13454 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13456 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13457 Input - SourceOffset &&
13458 "Previous placement doesn't match!");
13460 // Note that this correctly re-maps both when we do a swap and when
13461 // we observe the other side of the swap above. We rely on that to
13462 // avoid swapping the members of the input list directly.
13463 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13466 // Map the input's dword into the correct half.
13467 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13468 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13470 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13472 "Previous placement doesn't match!");
13475 // And just directly shift any other-half mask elements to be same-half
13476 // as we will have mirrored the dword containing the element into the
13477 // same position within that half.
13478 for (int &M : HalfMask)
13479 if (M >= SourceOffset && M < SourceOffset + 4) {
13480 M = M - SourceOffset + DestOffset;
13481 assert(M >= 0 && "This should never wrap below zero!");
13486 // Ensure we have the input in a viable dword of its current half. This
13487 // is particularly tricky because the original position may be clobbered
13488 // by inputs being moved and *staying* in that half.
13489 if (IncomingInputs.size() == 1) {
13490 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13491 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13493 SourceHalfMask[InputFixed - SourceOffset] =
13494 IncomingInputs[0] - SourceOffset;
13495 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13497 IncomingInputs[0] = InputFixed;
13499 } else if (IncomingInputs.size() == 2) {
13500 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13501 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13502 // We have two non-adjacent or clobbered inputs we need to extract from
13503 // the source half. To do this, we need to map them into some adjacent
13504 // dword slot in the source mask.
13505 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13506 IncomingInputs[1] - SourceOffset};
13508 // If there is a free slot in the source half mask adjacent to one of
13509 // the inputs, place the other input in it. We use (Index XOR 1) to
13510 // compute an adjacent index.
13511 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13512 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13513 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13514 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13515 InputsFixed[1] = InputsFixed[0] ^ 1;
13516 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13517 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13518 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13519 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13520 InputsFixed[0] = InputsFixed[1] ^ 1;
13521 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13522 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13523 // The two inputs are in the same DWord but it is clobbered and the
13524 // adjacent DWord isn't used at all. Move both inputs to the free
13526 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13527 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13528 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13529 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13531 // The only way we hit this point is if there is no clobbering
13532 // (because there are no off-half inputs to this half) and there is no
13533 // free slot adjacent to one of the inputs. In this case, we have to
13534 // swap an input with a non-input.
13535 for (int i = 0; i < 4; ++i)
13536 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13537 "We can't handle any clobbers here!");
13538 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13539 "Cannot have adjacent inputs here!");
13541 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13542 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13544 // We also have to update the final source mask in this case because
13545 // it may need to undo the above swap.
13546 for (int &M : FinalSourceHalfMask)
13547 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13548 M = InputsFixed[1] + SourceOffset;
13549 else if (M == InputsFixed[1] + SourceOffset)
13550 M = (InputsFixed[0] ^ 1) + SourceOffset;
13552 InputsFixed[1] = InputsFixed[0] ^ 1;
13555 // Point everything at the fixed inputs.
13556 for (int &M : HalfMask)
13557 if (M == IncomingInputs[0])
13558 M = InputsFixed[0] + SourceOffset;
13559 else if (M == IncomingInputs[1])
13560 M = InputsFixed[1] + SourceOffset;
13562 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13563 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13566 llvm_unreachable("Unhandled input size!");
13569 // Now hoist the DWord down to the right half.
13570 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13571 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13572 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13573 for (int &M : HalfMask)
13574 for (int Input : IncomingInputs)
13576 M = FreeDWord * 2 + Input % 2;
13578 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13579 /*SourceOffset*/ 4, /*DestOffset*/ 0);
13580 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13581 /*SourceOffset*/ 0, /*DestOffset*/ 4);
13583 // Now enact all the shuffles we've computed to move the inputs into their
13585 if (!isNoopShuffleMask(PSHUFLMask))
13586 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13587 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13588 if (!isNoopShuffleMask(PSHUFHMask))
13589 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13590 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13591 if (!isNoopShuffleMask(PSHUFDMask))
13592 V = DAG.getBitcast(
13594 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13595 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13597 // At this point, each half should contain all its inputs, and we can then
13598 // just shuffle them into their final position.
13599 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13600 "Failed to lift all the high half inputs to the low mask!");
13601 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13602 "Failed to lift all the low half inputs to the high mask!");
13604 // Do a half shuffle for the low mask.
13605 if (!isNoopShuffleMask(LoMask))
13606 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13607 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13609 // Do a half shuffle with the high mask after shifting its values down.
13610 for (int &M : HiMask)
13613 if (!isNoopShuffleMask(HiMask))
13614 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13615 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13620 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13621 /// blend if only one input is used.
13622 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13623 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13624 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13625 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13626 "Lane crossing shuffle masks not supported");
13628 int NumBytes = VT.getSizeInBits() / 8;
13629 int Size = Mask.size();
13630 int Scale = NumBytes / Size;
13632 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13633 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13637 for (int i = 0; i < NumBytes; ++i) {
13638 int M = Mask[i / Scale];
13642 const int ZeroMask = 0x80;
13643 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13644 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13645 if (Zeroable[i / Scale])
13646 V1Idx = V2Idx = ZeroMask;
13648 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13649 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13650 V1InUse |= (ZeroMask != V1Idx);
13651 V2InUse |= (ZeroMask != V2Idx);
13654 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13656 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13657 DAG.getBuildVector(ShufVT, DL, V1Mask));
13659 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13660 DAG.getBuildVector(ShufVT, DL, V2Mask));
13662 // If we need shuffled inputs from both, blend the two.
13664 if (V1InUse && V2InUse)
13665 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13667 V = V1InUse ? V1 : V2;
13669 // Cast the result back to the correct type.
13670 return DAG.getBitcast(VT, V);
13673 /// Generic lowering of 8-lane i16 shuffles.
13675 /// This handles both single-input shuffles and combined shuffle/blends with
13676 /// two inputs. The single input shuffles are immediately delegated to
13677 /// a dedicated lowering routine.
13679 /// The blends are lowered in one of three fundamental ways. If there are few
13680 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13681 /// of the input is significantly cheaper when lowered as an interleaving of
13682 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13683 /// halves of the inputs separately (making them have relatively few inputs)
13684 /// and then concatenate them.
13685 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13686 const APInt &Zeroable, SDValue V1, SDValue V2,
13687 const X86Subtarget &Subtarget,
13688 SelectionDAG &DAG) {
13689 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13690 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13691 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13693 // Whenever we can lower this as a zext, that instruction is strictly faster
13694 // than any alternative.
13695 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13696 Zeroable, Subtarget, DAG))
13699 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13701 if (NumV2Inputs == 0) {
13702 // Check for being able to broadcast a single element.
13703 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13704 Mask, Subtarget, DAG))
13707 // Try to use shift instructions.
13708 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
13709 Zeroable, Subtarget, DAG))
13712 // Use dedicated unpack instructions for masks that match their pattern.
13713 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13716 // Use dedicated pack instructions for masks that match their pattern.
13717 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13721 // Try to use byte rotation instructions.
13722 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13726 // Make a copy of the mask so it can be modified.
13727 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
13728 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13732 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
13733 "All single-input shuffles should be canonicalized to be V1-input "
13736 // Try to use shift instructions.
13737 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
13738 Zeroable, Subtarget, DAG))
13741 // See if we can use SSE4A Extraction / Insertion.
13742 if (Subtarget.hasSSE4A())
13743 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
13747 // There are special ways we can lower some single-element blends.
13748 if (NumV2Inputs == 1)
13749 if (SDValue V = lowerShuffleAsElementInsertion(
13750 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13753 // We have different paths for blend lowering, but they all must use the
13754 // *exact* same predicate.
13755 bool IsBlendSupported = Subtarget.hasSSE41();
13756 if (IsBlendSupported)
13757 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
13758 Zeroable, Subtarget, DAG))
13761 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
13762 Zeroable, Subtarget, DAG))
13765 // Use dedicated unpack instructions for masks that match their pattern.
13766 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13769 // Use dedicated pack instructions for masks that match their pattern.
13770 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13774 // Try to use byte rotation instructions.
13775 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
13779 if (SDValue BitBlend =
13780 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
13783 // Try to use byte shift instructions to mask.
13784 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
13785 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13788 // Try to lower by permuting the inputs into an unpack instruction.
13789 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
13790 Mask, Subtarget, DAG))
13793 // If we can't directly blend but can use PSHUFB, that will be better as it
13794 // can both shuffle and set up the inefficient blend.
13795 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
13796 bool V1InUse, V2InUse;
13797 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
13798 Zeroable, DAG, V1InUse, V2InUse);
13801 // We can always bit-blend if we have to so the fallback strategy is to
13802 // decompose into single-input permutes and blends.
13803 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
13804 Mask, Subtarget, DAG);
13807 /// Check whether a compaction lowering can be done by dropping even
13808 /// elements and compute how many times even elements must be dropped.
13810 /// This handles shuffles which take every Nth element where N is a power of
13811 /// two. Example shuffle masks:
13813 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
13814 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
13815 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
13816 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
13817 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
13818 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
13820 /// Any of these lanes can of course be undef.
13822 /// This routine only supports N <= 3.
13823 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
13826 /// \returns N above, or the number of times even elements must be dropped if
13827 /// there is such a number. Otherwise returns zero.
13828 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
13829 bool IsSingleInput) {
13830 // The modulus for the shuffle vector entries is based on whether this is
13831 // a single input or not.
13832 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
13833 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
13834 "We should only be called with masks with a power-of-2 size!");
13836 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
13838 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
13839 // and 2^3 simultaneously. This is because we may have ambiguity with
13840 // partially undef inputs.
13841 bool ViableForN[3] = {true, true, true};
13843 for (int i = 0, e = Mask.size(); i < e; ++i) {
13844 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
13849 bool IsAnyViable = false;
13850 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
13851 if (ViableForN[j]) {
13852 uint64_t N = j + 1;
13854 // The shuffle mask must be equal to (i * 2^N) % M.
13855 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
13856 IsAnyViable = true;
13858 ViableForN[j] = false;
13860 // Early exit if we exhaust the possible powers of two.
13865 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
13869 // Return 0 as there is no viable power of two.
13873 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
13874 ArrayRef<int> Mask, SDValue V1,
13875 SDValue V2, SelectionDAG &DAG) {
13876 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
13877 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
13879 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
13881 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
13883 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
13886 /// Generic lowering of v16i8 shuffles.
13888 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
13889 /// detect any complexity reducing interleaving. If that doesn't help, it uses
13890 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
13891 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
13893 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13894 const APInt &Zeroable, SDValue V1, SDValue V2,
13895 const X86Subtarget &Subtarget,
13896 SelectionDAG &DAG) {
13897 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
13898 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
13899 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
13901 // Try to use shift instructions.
13902 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
13903 Zeroable, Subtarget, DAG))
13906 // Try to use byte rotation instructions.
13907 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
13911 // Use dedicated pack instructions for masks that match their pattern.
13912 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
13916 // Try to use a zext lowering.
13917 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
13918 Zeroable, Subtarget, DAG))
13921 // See if we can use SSE4A Extraction / Insertion.
13922 if (Subtarget.hasSSE4A())
13923 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
13927 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
13929 // For single-input shuffles, there are some nicer lowering tricks we can use.
13930 if (NumV2Elements == 0) {
13931 // Check for being able to broadcast a single element.
13932 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
13933 Mask, Subtarget, DAG))
13936 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
13939 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
13940 // Notably, this handles splat and partial-splat shuffles more efficiently.
13941 // However, it only makes sense if the pre-duplication shuffle simplifies
13942 // things significantly. Currently, this means we need to be able to
13943 // express the pre-duplication shuffle as an i16 shuffle.
13945 // FIXME: We should check for other patterns which can be widened into an
13946 // i16 shuffle as well.
13947 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
13948 for (int i = 0; i < 16; i += 2)
13949 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
13954 auto tryToWidenViaDuplication = [&]() -> SDValue {
13955 if (!canWidenViaDuplication(Mask))
13957 SmallVector<int, 4> LoInputs;
13958 copy_if(Mask, std::back_inserter(LoInputs),
13959 [](int M) { return M >= 0 && M < 8; });
13960 array_pod_sort(LoInputs.begin(), LoInputs.end());
13961 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
13963 SmallVector<int, 4> HiInputs;
13964 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
13965 array_pod_sort(HiInputs.begin(), HiInputs.end());
13966 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
13969 bool TargetLo = LoInputs.size() >= HiInputs.size();
13970 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
13971 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
13973 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
13974 SmallDenseMap<int, int, 8> LaneMap;
13975 for (int I : InPlaceInputs) {
13976 PreDupI16Shuffle[I/2] = I/2;
13979 int j = TargetLo ? 0 : 4, je = j + 4;
13980 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
13981 // Check if j is already a shuffle of this input. This happens when
13982 // there are two adjacent bytes after we move the low one.
13983 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
13984 // If we haven't yet mapped the input, search for a slot into which
13986 while (j < je && PreDupI16Shuffle[j] >= 0)
13990 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
13993 // Map this input with the i16 shuffle.
13994 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
13997 // Update the lane map based on the mapping we ended up with.
13998 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14000 V1 = DAG.getBitcast(
14002 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14003 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14005 // Unpack the bytes to form the i16s that will be shuffled into place.
14006 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14007 MVT::v16i8, V1, V1);
14009 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14010 for (int i = 0; i < 16; ++i)
14011 if (Mask[i] >= 0) {
14012 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14013 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14014 if (PostDupI16Shuffle[i / 2] < 0)
14015 PostDupI16Shuffle[i / 2] = MappedMask;
14017 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14018 "Conflicting entries in the original shuffle!");
14020 return DAG.getBitcast(
14022 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14023 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14025 if (SDValue V = tryToWidenViaDuplication())
14029 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14030 Zeroable, Subtarget, DAG))
14033 // Use dedicated unpack instructions for masks that match their pattern.
14034 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14037 // Try to use byte shift instructions to mask.
14038 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14039 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14042 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14043 // with PSHUFB. It is important to do this before we attempt to generate any
14044 // blends but after all of the single-input lowerings. If the single input
14045 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14046 // want to preserve that and we can DAG combine any longer sequences into
14047 // a PSHUFB in the end. But once we start blending from multiple inputs,
14048 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14049 // and there are *very* few patterns that would actually be faster than the
14050 // PSHUFB approach because of its ability to zero lanes.
14052 // FIXME: The only exceptions to the above are blends which are exact
14053 // interleavings with direct instructions supporting them. We currently don't
14054 // handle those well here.
14055 if (Subtarget.hasSSSE3()) {
14056 bool V1InUse = false;
14057 bool V2InUse = false;
14059 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14060 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14062 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14063 // do so. This avoids using them to handle blends-with-zero which is
14064 // important as a single pshufb is significantly faster for that.
14065 if (V1InUse && V2InUse) {
14066 if (Subtarget.hasSSE41())
14067 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14068 Zeroable, Subtarget, DAG))
14071 // We can use an unpack to do the blending rather than an or in some
14072 // cases. Even though the or may be (very minorly) more efficient, we
14073 // preference this lowering because there are common cases where part of
14074 // the complexity of the shuffles goes away when we do the final blend as
14076 // FIXME: It might be worth trying to detect if the unpack-feeding
14077 // shuffles will both be pshufb, in which case we shouldn't bother with
14079 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14080 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14083 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14084 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14085 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14087 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14088 // PALIGNR will be cheaper than the second PSHUFB+OR.
14089 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14090 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14097 // There are special ways we can lower some single-element blends.
14098 if (NumV2Elements == 1)
14099 if (SDValue V = lowerShuffleAsElementInsertion(
14100 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14103 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14106 // Check whether a compaction lowering can be done. This handles shuffles
14107 // which take every Nth element for some even N. See the helper function for
14110 // We special case these as they can be particularly efficiently handled with
14111 // the PACKUSB instruction on x86 and they show up in common patterns of
14112 // rearranging bytes to truncate wide elements.
14113 bool IsSingleInput = V2.isUndef();
14114 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14115 // NumEvenDrops is the power of two stride of the elements. Another way of
14116 // thinking about it is that we need to drop the even elements this many
14117 // times to get the original input.
14119 // First we need to zero all the dropped bytes.
14120 assert(NumEvenDrops <= 3 &&
14121 "No support for dropping even elements more than 3 times.");
14122 // We use the mask type to pick which bytes are preserved based on how many
14123 // elements are dropped.
14124 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
14125 SDValue ByteClearMask = DAG.getBitcast(
14126 MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
14127 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14128 if (!IsSingleInput)
14129 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14131 // Now pack things back together.
14132 V1 = DAG.getBitcast(MVT::v8i16, V1);
14133 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14134 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14135 for (int i = 1; i < NumEvenDrops; ++i) {
14136 Result = DAG.getBitcast(MVT::v8i16, Result);
14137 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14143 // Handle multi-input cases by blending single-input shuffles.
14144 if (NumV2Elements > 0)
14145 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14148 // The fallback path for single-input shuffles widens this into two v8i16
14149 // vectors with unpacks, shuffles those, and then pulls them back together
14153 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14154 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14155 for (int i = 0; i < 16; ++i)
14157 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14159 SDValue VLoHalf, VHiHalf;
14160 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14161 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14163 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14164 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14165 // Use a mask to drop the high bytes.
14166 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14167 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14168 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14170 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14171 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14173 // Squash the masks to point directly into VLoHalf.
14174 for (int &M : LoBlendMask)
14177 for (int &M : HiBlendMask)
14181 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14182 // VHiHalf so that we can blend them as i16s.
14183 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14185 VLoHalf = DAG.getBitcast(
14186 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14187 VHiHalf = DAG.getBitcast(
14188 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14191 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14192 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14194 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14197 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14199 /// This routine breaks down the specific type of 128-bit shuffle and
14200 /// dispatches to the lowering routines accordingly.
14201 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14202 MVT VT, SDValue V1, SDValue V2,
14203 const APInt &Zeroable,
14204 const X86Subtarget &Subtarget,
14205 SelectionDAG &DAG) {
14206 switch (VT.SimpleTy) {
14208 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14210 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14212 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14214 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14216 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14218 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14221 llvm_unreachable("Unimplemented!");
14225 /// Generic routine to split vector shuffle into half-sized shuffles.
14227 /// This routine just extracts two subvectors, shuffles them independently, and
14228 /// then concatenates them back together. This should work effectively with all
14229 /// AVX vector shuffle types.
14230 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14231 SDValue V2, ArrayRef<int> Mask,
14232 SelectionDAG &DAG) {
14233 assert(VT.getSizeInBits() >= 256 &&
14234 "Only for 256-bit or wider vector shuffles!");
14235 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14236 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14238 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14239 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14241 int NumElements = VT.getVectorNumElements();
14242 int SplitNumElements = NumElements / 2;
14243 MVT ScalarVT = VT.getVectorElementType();
14244 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14246 // Rather than splitting build-vectors, just build two narrower build
14247 // vectors. This helps shuffling with splats and zeros.
14248 auto SplitVector = [&](SDValue V) {
14249 V = peekThroughBitcasts(V);
14251 MVT OrigVT = V.getSimpleValueType();
14252 int OrigNumElements = OrigVT.getVectorNumElements();
14253 int OrigSplitNumElements = OrigNumElements / 2;
14254 MVT OrigScalarVT = OrigVT.getVectorElementType();
14255 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14259 auto *BV = dyn_cast<BuildVectorSDNode>(V);
14261 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14262 DAG.getIntPtrConstant(0, DL));
14263 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14264 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14267 SmallVector<SDValue, 16> LoOps, HiOps;
14268 for (int i = 0; i < OrigSplitNumElements; ++i) {
14269 LoOps.push_back(BV->getOperand(i));
14270 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14272 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14273 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14275 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14276 DAG.getBitcast(SplitVT, HiV));
14279 SDValue LoV1, HiV1, LoV2, HiV2;
14280 std::tie(LoV1, HiV1) = SplitVector(V1);
14281 std::tie(LoV2, HiV2) = SplitVector(V2);
14283 // Now create two 4-way blends of these half-width vectors.
14284 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14285 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14286 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14287 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14288 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14289 for (int i = 0; i < SplitNumElements; ++i) {
14290 int M = HalfMask[i];
14291 if (M >= NumElements) {
14292 if (M >= NumElements + SplitNumElements)
14296 V2BlendMask[i] = M - NumElements;
14297 BlendMask[i] = SplitNumElements + i;
14298 } else if (M >= 0) {
14299 if (M >= SplitNumElements)
14303 V1BlendMask[i] = M;
14308 // Because the lowering happens after all combining takes place, we need to
14309 // manually combine these blend masks as much as possible so that we create
14310 // a minimal number of high-level vector shuffle nodes.
14312 // First try just blending the halves of V1 or V2.
14313 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14314 return DAG.getUNDEF(SplitVT);
14315 if (!UseLoV2 && !UseHiV2)
14316 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14317 if (!UseLoV1 && !UseHiV1)
14318 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14320 SDValue V1Blend, V2Blend;
14321 if (UseLoV1 && UseHiV1) {
14323 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14325 // We only use half of V1 so map the usage down into the final blend mask.
14326 V1Blend = UseLoV1 ? LoV1 : HiV1;
14327 for (int i = 0; i < SplitNumElements; ++i)
14328 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14329 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14331 if (UseLoV2 && UseHiV2) {
14333 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14335 // We only use half of V2 so map the usage down into the final blend mask.
14336 V2Blend = UseLoV2 ? LoV2 : HiV2;
14337 for (int i = 0; i < SplitNumElements; ++i)
14338 if (BlendMask[i] >= SplitNumElements)
14339 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14341 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14343 SDValue Lo = HalfBlend(LoMask);
14344 SDValue Hi = HalfBlend(HiMask);
14345 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14348 /// Either split a vector in halves or decompose the shuffles and the
14351 /// This is provided as a good fallback for many lowerings of non-single-input
14352 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14353 /// between splitting the shuffle into 128-bit components and stitching those
14354 /// back together vs. extracting the single-input shuffles and blending those
14356 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14357 SDValue V2, ArrayRef<int> Mask,
14358 const X86Subtarget &Subtarget,
14359 SelectionDAG &DAG) {
14360 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14361 "shuffles as it could then recurse on itself.");
14362 int Size = Mask.size();
14364 // If this can be modeled as a broadcast of two elements followed by a blend,
14365 // prefer that lowering. This is especially important because broadcasts can
14366 // often fold with memory operands.
14367 auto DoBothBroadcast = [&] {
14368 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14371 if (V2BroadcastIdx < 0)
14372 V2BroadcastIdx = M - Size;
14373 else if (M - Size != V2BroadcastIdx)
14375 } else if (M >= 0) {
14376 if (V1BroadcastIdx < 0)
14377 V1BroadcastIdx = M;
14378 else if (M != V1BroadcastIdx)
14383 if (DoBothBroadcast())
14384 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14387 // If the inputs all stem from a single 128-bit lane of each input, then we
14388 // split them rather than blending because the split will decompose to
14389 // unusually few instructions.
14390 int LaneCount = VT.getSizeInBits() / 128;
14391 int LaneSize = Size / LaneCount;
14392 SmallBitVector LaneInputs[2];
14393 LaneInputs[0].resize(LaneCount, false);
14394 LaneInputs[1].resize(LaneCount, false);
14395 for (int i = 0; i < Size; ++i)
14397 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14398 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14399 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14401 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14402 // that the decomposed single-input shuffles don't end up here.
14403 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14407 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14408 /// a lane permutation followed by a per-lane permutation.
14410 /// This is mainly for cases where we can have non-repeating permutes
14413 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14414 /// we should investigate merging them.
14415 static SDValue lowerShuffleAsLanePermuteAndPermute(
14416 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14417 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14418 int NumElts = VT.getVectorNumElements();
14419 int NumLanes = VT.getSizeInBits() / 128;
14420 int NumEltsPerLane = NumElts / NumLanes;
14422 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14423 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14425 for (int i = 0; i != NumElts; ++i) {
14430 // Ensure that each lane comes from a single source lane.
14431 int SrcLane = M / NumEltsPerLane;
14432 int DstLane = i / NumEltsPerLane;
14433 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14435 SrcLaneMask[DstLane] = SrcLane;
14437 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14440 // Make sure we set all elements of the lane mask, to avoid undef propagation.
14441 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14442 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14443 int SrcLane = SrcLaneMask[DstLane];
14445 for (int j = 0; j != NumEltsPerLane; ++j) {
14446 LaneMask[(DstLane * NumEltsPerLane) + j] =
14447 (SrcLane * NumEltsPerLane) + j;
14451 // If we're only shuffling a single lowest lane and the rest are identity
14452 // then don't bother.
14453 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14454 int NumIdentityLanes = 0;
14455 bool OnlyShuffleLowestLane = true;
14456 for (int i = 0; i != NumLanes; ++i) {
14457 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14458 i * NumEltsPerLane))
14459 NumIdentityLanes++;
14460 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14461 OnlyShuffleLowestLane = false;
14463 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14466 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14467 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14470 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14471 /// a permutation and blend of those lanes.
14473 /// This essentially blends the out-of-lane inputs to each lane into the lane
14474 /// from a permuted copy of the vector. This lowering strategy results in four
14475 /// instructions in the worst case for a single-input cross lane shuffle which
14476 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
14477 /// of. Special cases for each particular shuffle pattern should be handled
14478 /// prior to trying this lowering.
14479 static SDValue lowerShuffleAsLanePermuteAndBlend(
14480 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14481 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14482 // FIXME: This should probably be generalized for 512-bit vectors as well.
14483 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14484 int Size = Mask.size();
14485 int LaneSize = Size / 2;
14487 // If there are only inputs from one 128-bit lane, splitting will in fact be
14488 // less expensive. The flags track whether the given lane contains an element
14489 // that crosses to another lane.
14490 if (!Subtarget.hasAVX2()) {
14491 bool LaneCrossing[2] = {false, false};
14492 for (int i = 0; i < Size; ++i)
14493 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
14494 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14495 if (!LaneCrossing[0] || !LaneCrossing[1])
14496 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14498 bool LaneUsed[2] = {false, false};
14499 for (int i = 0; i < Size; ++i)
14501 LaneUsed[(Mask[i] / LaneSize)] = true;
14502 if (!LaneUsed[0] || !LaneUsed[1])
14503 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14506 assert(V2.isUndef() &&
14507 "This last part of this routine only works on single input shuffles");
14509 SmallVector<int, 32> FlippedBlendMask(Size);
14510 for (int i = 0; i < Size; ++i)
14511 FlippedBlendMask[i] =
14512 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
14514 : Mask[i] % LaneSize +
14515 (i / LaneSize) * LaneSize + Size);
14517 // Flip the vector, and blend the results which should now be in-lane.
14518 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14519 SDValue Flipped = DAG.getBitcast(PVT, V1);
14520 Flipped = DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT),
14522 Flipped = DAG.getBitcast(VT, Flipped);
14523 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
14526 /// Handle lowering 2-lane 128-bit shuffles.
14527 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14528 SDValue V2, ArrayRef<int> Mask,
14529 const APInt &Zeroable,
14530 const X86Subtarget &Subtarget,
14531 SelectionDAG &DAG) {
14532 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14533 if (Subtarget.hasAVX2() && V2.isUndef())
14536 SmallVector<int, 4> WidenedMask;
14537 if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
14540 bool IsLowZero = (Zeroable & 0x3) == 0x3;
14541 bool IsHighZero = (Zeroable & 0xc) == 0xc;
14543 // Try to use an insert into a zero vector.
14544 if (WidenedMask[0] == 0 && IsHighZero) {
14545 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14546 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14547 DAG.getIntPtrConstant(0, DL));
14548 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14549 getZeroVector(VT, Subtarget, DAG, DL), LoV,
14550 DAG.getIntPtrConstant(0, DL));
14553 // TODO: If minimizing size and one of the inputs is a zero vector and the
14554 // the zero vector has only one use, we could use a VPERM2X128 to save the
14555 // instruction bytes needed to explicitly generate the zero vector.
14557 // Blends are faster and handle all the non-lane-crossing cases.
14558 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14562 // If either input operand is a zero vector, use VPERM2X128 because its mask
14563 // allows us to replace the zero input with an implicit zero.
14564 if (!IsLowZero && !IsHighZero) {
14565 // Check for patterns which can be matched with a single insert of a 128-bit
14567 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
14568 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
14570 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14571 // this will likely become vinsertf128 which can't fold a 256-bit memop.
14572 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14573 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14574 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14575 OnlyUsesV1 ? V1 : V2,
14576 DAG.getIntPtrConstant(0, DL));
14577 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14578 DAG.getIntPtrConstant(2, DL));
14582 // Try to use SHUF128 if possible.
14583 if (Subtarget.hasVLX()) {
14584 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14585 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14586 ((WidenedMask[1] % 2) << 1);
14587 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14588 DAG.getConstant(PermMask, DL, MVT::i8));
14593 // Otherwise form a 128-bit permutation. After accounting for undefs,
14594 // convert the 64-bit shuffle mask selection values into 128-bit
14595 // selection bits by dividing the indexes by 2 and shifting into positions
14596 // defined by a vperm2*128 instruction's immediate control byte.
14598 // The immediate permute control byte looks like this:
14599 // [1:0] - select 128 bits from sources for low half of destination
14601 // [3] - zero low half of destination
14602 // [5:4] - select 128 bits from sources for high half of destination
14604 // [7] - zero high half of destination
14606 assert((WidenedMask[0] >= 0 || IsLowZero) &&
14607 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14609 unsigned PermMask = 0;
14610 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
14611 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14613 // Check the immediate mask and replace unused sources with undef.
14614 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14615 V1 = DAG.getUNDEF(VT);
14616 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14617 V2 = DAG.getUNDEF(VT);
14619 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14620 DAG.getConstant(PermMask, DL, MVT::i8));
14623 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14624 /// shuffling each lane.
14626 /// This attempts to create a repeated lane shuffle where each lane uses one
14627 /// or two of the lanes of the inputs. The lanes of the input vectors are
14628 /// shuffled in one or two independent shuffles to get the lanes into the
14629 /// position needed by the final shuffle.
14630 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14631 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14632 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14633 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14635 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14638 int Size = Mask.size();
14639 int NumLanes = VT.getSizeInBits() / 128;
14640 int LaneSize = 128 / VT.getScalarSizeInBits();
14641 SmallVector<int, 16> RepeatMask(LaneSize, -1);
14642 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14644 // First pass will try to fill in the RepeatMask from lanes that need two
14646 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14647 int Srcs[2] = { -1, -1 };
14648 SmallVector<int, 16> InLaneMask(LaneSize, -1);
14649 for (int i = 0; i != LaneSize; ++i) {
14650 int M = Mask[(Lane * LaneSize) + i];
14653 // Determine which of the possible input lanes (NumLanes from each source)
14654 // this element comes from. Assign that as one of the sources for this
14655 // lane. We can assign up to 2 sources for this lane. If we run out
14656 // sources we can't do anything.
14657 int LaneSrc = M / LaneSize;
14659 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14661 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14666 Srcs[Src] = LaneSrc;
14667 InLaneMask[i] = (M % LaneSize) + Src * Size;
14670 // If this lane has two sources, see if it fits with the repeat mask so far.
14674 LaneSrcs[Lane][0] = Srcs[0];
14675 LaneSrcs[Lane][1] = Srcs[1];
14677 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14678 assert(M1.size() == M2.size() && "Unexpected mask size");
14679 for (int i = 0, e = M1.size(); i != e; ++i)
14680 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14685 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14686 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14687 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14691 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14692 "Unexpected mask element");
14697 if (MatchMasks(InLaneMask, RepeatMask)) {
14698 // Merge this lane mask into the final repeat mask.
14699 MergeMasks(InLaneMask, RepeatMask);
14703 // Didn't find a match. Swap the operands and try again.
14704 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
14705 ShuffleVectorSDNode::commuteMask(InLaneMask);
14707 if (MatchMasks(InLaneMask, RepeatMask)) {
14708 // Merge this lane mask into the final repeat mask.
14709 MergeMasks(InLaneMask, RepeatMask);
14713 // Couldn't find a match with the operands in either order.
14717 // Now handle any lanes with only one source.
14718 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14719 // If this lane has already been processed, skip it.
14720 if (LaneSrcs[Lane][0] >= 0)
14723 for (int i = 0; i != LaneSize; ++i) {
14724 int M = Mask[(Lane * LaneSize) + i];
14728 // If RepeatMask isn't defined yet we can define it ourself.
14729 if (RepeatMask[i] < 0)
14730 RepeatMask[i] = M % LaneSize;
14732 if (RepeatMask[i] < Size) {
14733 if (RepeatMask[i] != M % LaneSize)
14735 LaneSrcs[Lane][0] = M / LaneSize;
14737 if (RepeatMask[i] != ((M % LaneSize) + Size))
14739 LaneSrcs[Lane][1] = M / LaneSize;
14743 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
14747 SmallVector<int, 16> NewMask(Size, -1);
14748 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14749 int Src = LaneSrcs[Lane][0];
14750 for (int i = 0; i != LaneSize; ++i) {
14753 M = Src * LaneSize + i;
14754 NewMask[Lane * LaneSize + i] = M;
14757 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
14758 // Ensure we didn't get back the shuffle we started with.
14759 // FIXME: This is a hack to make up for some splat handling code in
14760 // getVectorShuffle.
14761 if (isa<ShuffleVectorSDNode>(NewV1) &&
14762 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
14765 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14766 int Src = LaneSrcs[Lane][1];
14767 for (int i = 0; i != LaneSize; ++i) {
14770 M = Src * LaneSize + i;
14771 NewMask[Lane * LaneSize + i] = M;
14774 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
14775 // Ensure we didn't get back the shuffle we started with.
14776 // FIXME: This is a hack to make up for some splat handling code in
14777 // getVectorShuffle.
14778 if (isa<ShuffleVectorSDNode>(NewV2) &&
14779 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
14782 for (int i = 0; i != Size; ++i) {
14783 NewMask[i] = RepeatMask[i % LaneSize];
14784 if (NewMask[i] < 0)
14787 NewMask[i] += (i / LaneSize) * LaneSize;
14789 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
14792 /// If the input shuffle mask results in a vector that is undefined in all upper
14793 /// or lower half elements and that mask accesses only 2 halves of the
14794 /// shuffle's operands, return true. A mask of half the width with mask indexes
14795 /// adjusted to access the extracted halves of the original shuffle operands is
14796 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
14797 /// lower half of each input operand is accessed.
14799 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
14800 int &HalfIdx1, int &HalfIdx2) {
14801 assert((Mask.size() == HalfMask.size() * 2) &&
14802 "Expected input mask to be twice as long as output");
14804 // Exactly one half of the result must be undef to allow narrowing.
14805 bool UndefLower = isUndefLowerHalf(Mask);
14806 bool UndefUpper = isUndefUpperHalf(Mask);
14807 if (UndefLower == UndefUpper)
14810 unsigned HalfNumElts = HalfMask.size();
14811 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
14814 for (unsigned i = 0; i != HalfNumElts; ++i) {
14815 int M = Mask[i + MaskIndexOffset];
14821 // Determine which of the 4 half vectors this element is from.
14822 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
14823 int HalfIdx = M / HalfNumElts;
14825 // Determine the element index into its half vector source.
14826 int HalfElt = M % HalfNumElts;
14828 // We can shuffle with up to 2 half vectors, set the new 'half'
14829 // shuffle mask accordingly.
14830 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
14831 HalfMask[i] = HalfElt;
14832 HalfIdx1 = HalfIdx;
14835 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
14836 HalfMask[i] = HalfElt + HalfNumElts;
14837 HalfIdx2 = HalfIdx;
14841 // Too many half vectors referenced.
14848 /// Given the output values from getHalfShuffleMask(), create a half width
14849 /// shuffle of extracted vectors followed by an insert back to full width.
14850 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
14851 ArrayRef<int> HalfMask, int HalfIdx1,
14852 int HalfIdx2, bool UndefLower,
14853 SelectionDAG &DAG) {
14854 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
14855 assert(V1.getValueType().isSimple() && "Expecting only simple types");
14857 MVT VT = V1.getSimpleValueType();
14858 unsigned NumElts = VT.getVectorNumElements();
14859 unsigned HalfNumElts = NumElts / 2;
14860 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
14862 auto getHalfVector = [&](int HalfIdx) {
14864 return DAG.getUNDEF(HalfVT);
14865 SDValue V = (HalfIdx < 2 ? V1 : V2);
14866 HalfIdx = (HalfIdx % 2) * HalfNumElts;
14867 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
14868 DAG.getIntPtrConstant(HalfIdx, DL));
14871 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
14872 SDValue Half1 = getHalfVector(HalfIdx1);
14873 SDValue Half2 = getHalfVector(HalfIdx2);
14874 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
14875 unsigned Offset = UndefLower ? HalfNumElts : 0;
14876 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
14877 DAG.getIntPtrConstant(Offset, DL));
14880 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
14881 /// This allows for fast cases such as subvector extraction/insertion
14882 /// or shuffling smaller vector types which can lower more efficiently.
14883 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
14884 SDValue V2, ArrayRef<int> Mask,
14885 const X86Subtarget &Subtarget,
14886 SelectionDAG &DAG) {
14887 assert((VT.is256BitVector() || VT.is512BitVector()) &&
14888 "Expected 256-bit or 512-bit vector");
14890 bool UndefLower = isUndefLowerHalf(Mask);
14891 if (!UndefLower && !isUndefUpperHalf(Mask))
14894 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
14895 "Completely undef shuffle mask should have been simplified already");
14897 // Upper half is undef and lower half is whole upper subvector.
14898 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
14899 unsigned NumElts = VT.getVectorNumElements();
14900 unsigned HalfNumElts = NumElts / 2;
14901 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
14903 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
14904 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
14905 DAG.getIntPtrConstant(HalfNumElts, DL));
14906 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
14907 DAG.getIntPtrConstant(0, DL));
14910 // Lower half is undef and upper half is whole lower subvector.
14911 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
14913 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
14914 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
14915 DAG.getIntPtrConstant(0, DL));
14916 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
14917 DAG.getIntPtrConstant(HalfNumElts, DL));
14920 int HalfIdx1, HalfIdx2;
14921 SmallVector<int, 8> HalfMask(HalfNumElts);
14922 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
14925 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
14927 // Only shuffle the halves of the inputs when useful.
14928 unsigned NumLowerHalves =
14929 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
14930 unsigned NumUpperHalves =
14931 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
14932 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
14934 // Determine the larger pattern of undef/halves, then decide if it's worth
14935 // splitting the shuffle based on subtarget capabilities and types.
14936 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
14938 // XXXXuuuu: no insert is needed.
14939 // Always extract lowers when setting lower - these are all free subreg ops.
14940 if (NumUpperHalves == 0)
14941 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14944 if (NumUpperHalves == 1) {
14945 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
14946 if (Subtarget.hasAVX2()) {
14947 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
14948 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
14949 !is128BitUnpackShuffleMask(HalfMask) &&
14950 (!isSingleSHUFPSMask(HalfMask) ||
14951 Subtarget.hasFastVariableShuffle()))
14953 // If this is a unary shuffle (assume that the 2nd operand is
14954 // canonicalized to undef), then we can use vpermpd. Otherwise, we
14955 // are better off extracting the upper half of 1 operand and using a
14957 if (EltWidth == 64 && V2.isUndef())
14960 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
14961 if (Subtarget.hasAVX512() && VT.is512BitVector())
14963 // Extract + narrow shuffle is better than the wide alternative.
14964 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14968 // Don't extract both uppers, instead shuffle and then extract.
14969 assert(NumUpperHalves == 2 && "Half vector count went wrong");
14973 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
14974 if (NumUpperHalves == 0) {
14975 // AVX2 has efficient 64-bit element cross-lane shuffles.
14976 // TODO: Refine to account for unary shuffle, splat, and other masks?
14977 if (Subtarget.hasAVX2() && EltWidth == 64)
14979 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
14980 if (Subtarget.hasAVX512() && VT.is512BitVector())
14982 // Narrow shuffle + insert is better than the wide alternative.
14983 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
14987 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
14991 /// Test whether the specified input (0 or 1) is in-place blended by the
14994 /// This returns true if the elements from a particular input are already in the
14995 /// slot required by the given mask and require no permutation.
14996 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
14997 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
14998 int Size = Mask.size();
14999 for (int i = 0; i < Size; ++i)
15000 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15006 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15007 /// every lane can be represented as the same repeating mask - allowing us to
15008 /// shuffle the sources with the repeating shuffle and then permute the result
15009 /// to the destination lanes.
15010 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15011 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15012 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15013 int NumElts = VT.getVectorNumElements();
15014 int NumLanes = VT.getSizeInBits() / 128;
15015 int NumLaneElts = NumElts / NumLanes;
15017 // On AVX2 we may be able to just shuffle the lowest elements and then
15018 // broadcast the result.
15019 if (Subtarget.hasAVX2()) {
15020 for (unsigned BroadcastSize : {16, 32, 64}) {
15021 if (BroadcastSize <= VT.getScalarSizeInBits())
15023 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15025 // Attempt to match a repeating pattern every NumBroadcastElts,
15026 // accounting for UNDEFs but only references the lowest 128-bit
15027 // lane of the inputs.
15028 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15029 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15030 for (int j = 0; j != NumBroadcastElts; ++j) {
15031 int M = Mask[i + j];
15034 int &R = RepeatMask[j];
15035 if (0 != ((M % NumElts) / NumLaneElts))
15037 if (0 <= R && R != M)
15044 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15045 if (!FindRepeatingBroadcastMask(RepeatMask))
15048 // Shuffle the (lowest) repeated elements in place for broadcast.
15049 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15051 // Shuffle the actual broadcast.
15052 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15053 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15054 for (int j = 0; j != NumBroadcastElts; ++j)
15055 BroadcastMask[i + j] = j;
15056 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15061 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15062 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15065 // Bail if we already have a repeated lane shuffle mask.
15066 SmallVector<int, 8> RepeatedShuffleMask;
15067 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15070 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15071 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15072 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15073 int NumSubLanes = NumLanes * SubLaneScale;
15074 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15076 // Check that all the sources are coming from the same lane and see if we can
15077 // form a repeating shuffle mask (local to each sub-lane). At the same time,
15078 // determine the source sub-lane for each destination sub-lane.
15079 int TopSrcSubLane = -1;
15080 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15081 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15082 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15083 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15085 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15086 // Extract the sub-lane mask, check that it all comes from the same lane
15087 // and normalize the mask entries to come from the first lane.
15089 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15090 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15091 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15094 int Lane = (M % NumElts) / NumLaneElts;
15095 if ((0 <= SrcLane) && (SrcLane != Lane))
15098 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15099 SubLaneMask[Elt] = LocalM;
15102 // Whole sub-lane is UNDEF.
15106 // Attempt to match against the candidate repeated sub-lane masks.
15107 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15108 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15109 for (int i = 0; i != NumSubLaneElts; ++i) {
15110 if (M1[i] < 0 || M2[i] < 0)
15112 if (M1[i] != M2[i])
15118 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15119 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15122 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15123 for (int i = 0; i != NumSubLaneElts; ++i) {
15124 int M = SubLaneMask[i];
15127 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15128 "Unexpected mask element");
15129 RepeatedSubLaneMask[i] = M;
15132 // Track the top most source sub-lane - by setting the remaining to UNDEF
15133 // we can greatly simplify shuffle matching.
15134 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15135 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15136 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15140 // Bail if we failed to find a matching repeated sub-lane mask.
15141 if (Dst2SrcSubLanes[DstSubLane] < 0)
15144 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15145 "Unexpected source lane");
15147 // Create a repeating shuffle mask for the entire vector.
15148 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15149 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15150 int Lane = SubLane / SubLaneScale;
15151 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15152 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15153 int M = RepeatedSubLaneMask[Elt];
15156 int Idx = (SubLane * NumSubLaneElts) + Elt;
15157 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15160 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15162 // Shuffle each source sub-lane to its destination.
15163 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15164 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15165 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15166 if (SrcSubLane < 0)
15168 for (int j = 0; j != NumSubLaneElts; ++j)
15169 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15172 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15176 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15177 unsigned &ShuffleImm, ArrayRef<int> Mask) {
15178 int NumElts = VT.getVectorNumElements();
15179 assert(VT.getScalarSizeInBits() == 64 &&
15180 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15181 "Unexpected data type for VSHUFPD");
15183 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15184 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15186 bool ShufpdMask = true;
15187 bool CommutableMask = true;
15188 for (int i = 0; i < NumElts; ++i) {
15189 if (Mask[i] == SM_SentinelUndef)
15193 int Val = (i & 6) + NumElts * (i & 1);
15194 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15195 if (Mask[i] < Val || Mask[i] > Val + 1)
15196 ShufpdMask = false;
15197 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15198 CommutableMask = false;
15199 ShuffleImm |= (Mask[i] % 2) << i;
15204 if (CommutableMask) {
15212 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
15213 ArrayRef<int> Mask, SDValue V1,
15214 SDValue V2, SelectionDAG &DAG) {
15215 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&
15216 "Unexpected data type for VSHUFPD");
15218 unsigned Immediate = 0;
15219 if (!matchShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask))
15222 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15223 DAG.getConstant(Immediate, DL, MVT::i8));
15226 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15228 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15229 /// isn't available.
15230 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15231 const APInt &Zeroable, SDValue V1, SDValue V2,
15232 const X86Subtarget &Subtarget,
15233 SelectionDAG &DAG) {
15234 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15235 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15236 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15238 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15242 if (V2.isUndef()) {
15243 // Check for being able to broadcast a single element.
15244 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15245 Mask, Subtarget, DAG))
15248 // Use low duplicate instructions for masks that match their pattern.
15249 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15250 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15252 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15253 // Non-half-crossing single input shuffles can be lowered with an
15254 // interleaved permutation.
15255 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15256 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15257 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15258 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
15261 // With AVX2 we have direct support for this permutation.
15262 if (Subtarget.hasAVX2())
15263 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15264 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15266 // Try to create an in-lane repeating shuffle mask and then shuffle the
15267 // results into the target lanes.
15268 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15269 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15272 // Try to permute the lanes and then use a per-lane permute.
15273 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15274 Mask, DAG, Subtarget))
15277 // Otherwise, fall back.
15278 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask, DAG,
15282 // Use dedicated unpack instructions for masks that match their pattern.
15283 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15286 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15287 Zeroable, Subtarget, DAG))
15290 // Check if the blend happens to exactly fit that of SHUFPD.
15291 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
15294 // If we have one input in place, then we can permute the other input and
15295 // blend the result.
15296 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15297 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15300 // Try to create an in-lane repeating shuffle mask and then shuffle the
15301 // results into the target lanes.
15302 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15303 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15306 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15307 // shuffle. However, if we have AVX2 and either inputs are already in place,
15308 // we will be able to shuffle even across lanes the other input in a single
15309 // instruction so skip this pattern.
15310 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15311 isShuffleMaskInputInPlace(1, Mask))))
15312 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15313 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15316 // If we have VLX support, we can use VEXPAND.
15317 if (Subtarget.hasVLX())
15318 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15322 // If we have AVX2 then we always want to lower with a blend because an v4 we
15323 // can fully permute the elements.
15324 if (Subtarget.hasAVX2())
15325 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15328 // Otherwise fall back on generic lowering.
15329 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15333 /// Handle lowering of 4-lane 64-bit integer shuffles.
15335 /// This routine is only called when we have AVX2 and thus a reasonable
15336 /// instruction set for v4i64 shuffling..
15337 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15338 const APInt &Zeroable, SDValue V1, SDValue V2,
15339 const X86Subtarget &Subtarget,
15340 SelectionDAG &DAG) {
15341 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15342 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15343 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15344 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15346 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15350 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15351 Zeroable, Subtarget, DAG))
15354 // Check for being able to broadcast a single element.
15355 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15359 if (V2.isUndef()) {
15360 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15361 // can use lower latency instructions that will operate on both lanes.
15362 SmallVector<int, 2> RepeatedMask;
15363 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15364 SmallVector<int, 4> PSHUFDMask;
15365 scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15366 return DAG.getBitcast(
15368 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15369 DAG.getBitcast(MVT::v8i32, V1),
15370 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15373 // AVX2 provides a direct instruction for permuting a single input across
15375 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15376 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15379 // Try to use shift instructions.
15380 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15381 Zeroable, Subtarget, DAG))
15384 // If we have VLX support, we can use VALIGN or VEXPAND.
15385 if (Subtarget.hasVLX()) {
15386 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15390 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15395 // Try to use PALIGNR.
15396 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15400 // Use dedicated unpack instructions for masks that match their pattern.
15401 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15404 // If we have one input in place, then we can permute the other input and
15405 // blend the result.
15406 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15407 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15410 // Try to create an in-lane repeating shuffle mask and then shuffle the
15411 // results into the target lanes.
15412 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15413 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15416 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15417 // shuffle. However, if we have AVX2 and either inputs are already in place,
15418 // we will be able to shuffle even across lanes the other input in a single
15419 // instruction so skip this pattern.
15420 if (!isShuffleMaskInputInPlace(0, Mask) &&
15421 !isShuffleMaskInputInPlace(1, Mask))
15422 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15423 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15426 // Otherwise fall back on generic blend lowering.
15427 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15431 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15433 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15434 /// isn't available.
15435 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15436 const APInt &Zeroable, SDValue V1, SDValue V2,
15437 const X86Subtarget &Subtarget,
15438 SelectionDAG &DAG) {
15439 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15440 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15441 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15443 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15444 Zeroable, Subtarget, DAG))
15447 // Check for being able to broadcast a single element.
15448 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15452 // If the shuffle mask is repeated in each 128-bit lane, we have many more
15453 // options to efficiently lower the shuffle.
15454 SmallVector<int, 4> RepeatedMask;
15455 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15456 assert(RepeatedMask.size() == 4 &&
15457 "Repeated masks must be half the mask width!");
15459 // Use even/odd duplicate instructions for masks that match their pattern.
15460 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
15461 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15462 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
15463 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15466 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15467 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15469 // Use dedicated unpack instructions for masks that match their pattern.
15470 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15473 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15474 // have already handled any direct blends.
15475 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15478 // Try to create an in-lane repeating shuffle mask and then shuffle the
15479 // results into the target lanes.
15480 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15481 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15484 // If we have a single input shuffle with different shuffle patterns in the
15485 // two 128-bit lanes use the variable mask to VPERMILPS.
15486 if (V2.isUndef()) {
15487 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15488 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
15489 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15491 if (Subtarget.hasAVX2())
15492 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
15494 // Otherwise, fall back.
15495 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
15499 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15501 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15502 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15505 // If we have VLX support, we can use VEXPAND.
15506 if (Subtarget.hasVLX())
15507 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
15511 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15512 // since after split we get a more efficient code using vpunpcklwd and
15513 // vpunpckhwd instrs than vblend.
15514 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
15515 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15519 // If we have AVX2 then we always want to lower with a blend because at v8 we
15520 // can fully permute the elements.
15521 if (Subtarget.hasAVX2())
15522 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
15525 // Otherwise fall back on generic lowering.
15526 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15530 /// Handle lowering of 8-lane 32-bit integer shuffles.
15532 /// This routine is only called when we have AVX2 and thus a reasonable
15533 /// instruction set for v8i32 shuffling..
15534 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15535 const APInt &Zeroable, SDValue V1, SDValue V2,
15536 const X86Subtarget &Subtarget,
15537 SelectionDAG &DAG) {
15538 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15539 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15540 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15541 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
15543 // Whenever we can lower this as a zext, that instruction is strictly faster
15544 // than any alternative. It also allows us to fold memory operands into the
15545 // shuffle in many cases.
15546 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15547 Zeroable, Subtarget, DAG))
15550 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15551 // since after split we get a more efficient code than vblend by using
15552 // vpunpcklwd and vpunpckhwd instrs.
15553 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
15554 !Subtarget.hasAVX512())
15555 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
15559 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
15560 Zeroable, Subtarget, DAG))
15563 // Check for being able to broadcast a single element.
15564 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
15568 // If the shuffle mask is repeated in each 128-bit lane we can use more
15569 // efficient instructions that mirror the shuffles across the two 128-bit
15571 SmallVector<int, 4> RepeatedMask;
15572 bool Is128BitLaneRepeatedShuffle =
15573 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
15574 if (Is128BitLaneRepeatedShuffle) {
15575 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
15577 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
15578 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15580 // Use dedicated unpack instructions for masks that match their pattern.
15581 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
15585 // Try to use shift instructions.
15586 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
15587 Zeroable, Subtarget, DAG))
15590 // If we have VLX support, we can use VALIGN or EXPAND.
15591 if (Subtarget.hasVLX()) {
15592 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
15596 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
15601 // Try to use byte rotation instructions.
15602 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
15606 // Try to create an in-lane repeating shuffle mask and then shuffle the
15607 // results into the target lanes.
15608 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15609 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15612 // If the shuffle patterns aren't repeated but it is a single input, directly
15613 // generate a cross-lane VPERMD instruction.
15614 if (V2.isUndef()) {
15615 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15616 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
15619 // Assume that a single SHUFPS is faster than an alternative sequence of
15620 // multiple instructions (even if the CPU has a domain penalty).
15621 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15622 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
15623 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
15624 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
15625 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
15626 CastV1, CastV2, DAG);
15627 return DAG.getBitcast(MVT::v8i32, ShufPS);
15630 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15632 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15633 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15636 // Otherwise fall back on generic blend lowering.
15637 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
15641 /// Handle lowering of 16-lane 16-bit integer shuffles.
15643 /// This routine is only called when we have AVX2 and thus a reasonable
15644 /// instruction set for v16i16 shuffling..
15645 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15646 const APInt &Zeroable, SDValue V1, SDValue V2,
15647 const X86Subtarget &Subtarget,
15648 SelectionDAG &DAG) {
15649 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15650 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15651 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
15652 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
15654 // Whenever we can lower this as a zext, that instruction is strictly faster
15655 // than any alternative. It also allows us to fold memory operands into the
15656 // shuffle in many cases.
15657 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
15658 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
15661 // Check for being able to broadcast a single element.
15662 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
15666 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
15667 Zeroable, Subtarget, DAG))
15670 // Use dedicated unpack instructions for masks that match their pattern.
15671 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
15674 // Use dedicated pack instructions for masks that match their pattern.
15675 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
15679 // Try to use shift instructions.
15680 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
15681 Zeroable, Subtarget, DAG))
15684 // Try to use byte rotation instructions.
15685 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
15689 // Try to create an in-lane repeating shuffle mask and then shuffle the
15690 // results into the target lanes.
15691 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15692 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
15695 if (V2.isUndef()) {
15696 // There are no generalized cross-lane shuffle operations available on i16
15698 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
15699 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15700 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
15703 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2, Mask,
15707 SmallVector<int, 8> RepeatedMask;
15708 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
15709 // As this is a single-input shuffle, the repeated mask should be
15710 // a strictly valid v8i16 mask that we can pass through to the v8i16
15711 // lowering to handle even the v16 case.
15712 return lowerV8I16GeneralSingleInputShuffle(
15713 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
15717 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
15718 Zeroable, Subtarget, DAG))
15721 // AVX512BWVL can lower to VPERMW.
15722 if (Subtarget.hasBWI() && Subtarget.hasVLX())
15723 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
15725 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15727 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15728 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
15731 // Try to permute the lanes and then use a per-lane permute.
15732 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15733 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
15736 // Otherwise fall back on generic lowering.
15737 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
15741 /// Handle lowering of 32-lane 8-bit integer shuffles.
15743 /// This routine is only called when we have AVX2 and thus a reasonable
15744 /// instruction set for v32i8 shuffling..
15745 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15746 const APInt &Zeroable, SDValue V1, SDValue V2,
15747 const X86Subtarget &Subtarget,
15748 SelectionDAG &DAG) {
15749 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
15750 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
15751 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
15752 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
15754 // Whenever we can lower this as a zext, that instruction is strictly faster
15755 // than any alternative. It also allows us to fold memory operands into the
15756 // shuffle in many cases.
15757 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
15758 Zeroable, Subtarget, DAG))
15761 // Check for being able to broadcast a single element.
15762 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
15766 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
15767 Zeroable, Subtarget, DAG))
15770 // Use dedicated unpack instructions for masks that match their pattern.
15771 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
15774 // Use dedicated pack instructions for masks that match their pattern.
15775 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
15779 // Try to use shift instructions.
15780 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
15781 Zeroable, Subtarget, DAG))
15784 // Try to use byte rotation instructions.
15785 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
15789 // Try to create an in-lane repeating shuffle mask and then shuffle the
15790 // results into the target lanes.
15791 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15792 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
15795 // There are no generalized cross-lane shuffle operations available on i8
15797 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
15798 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15799 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
15802 return lowerShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask, DAG,
15806 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
15807 Zeroable, Subtarget, DAG))
15810 // AVX512VBMIVL can lower to VPERMB.
15811 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
15812 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
15814 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15816 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15817 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
15820 // Try to permute the lanes and then use a per-lane permute.
15821 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
15822 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
15825 // Otherwise fall back on generic lowering.
15826 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
15830 /// High-level routine to lower various 256-bit x86 vector shuffles.
15832 /// This routine either breaks down the specific type of a 256-bit x86 vector
15833 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
15834 /// together based on the available instructions.
15835 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
15836 SDValue V1, SDValue V2, const APInt &Zeroable,
15837 const X86Subtarget &Subtarget,
15838 SelectionDAG &DAG) {
15839 // If we have a single input to the zero element, insert that into V1 if we
15840 // can do so cheaply.
15841 int NumElts = VT.getVectorNumElements();
15842 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
15844 if (NumV2Elements == 1 && Mask[0] >= NumElts)
15845 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15846 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
15849 // Handle special cases where the lower or upper half is UNDEF.
15851 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
15854 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
15855 // can check for those subtargets here and avoid much of the subtarget
15856 // querying in the per-vector-type lowering routines. With AVX1 we have
15857 // essentially *zero* ability to manipulate a 256-bit vector with integer
15858 // types. Since we'll use floating point types there eventually, just
15859 // immediately cast everything to a float and operate entirely in that domain.
15860 if (VT.isInteger() && !Subtarget.hasAVX2()) {
15861 int ElementBits = VT.getScalarSizeInBits();
15862 if (ElementBits < 32) {
15863 // No floating point type available, if we can't use the bit operations
15864 // for masking/blending then decompose into 128-bit vectors.
15865 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
15868 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
15870 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15873 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
15874 VT.getVectorNumElements());
15875 V1 = DAG.getBitcast(FpVT, V1);
15876 V2 = DAG.getBitcast(FpVT, V2);
15877 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
15880 switch (VT.SimpleTy) {
15882 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15884 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15886 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15888 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15890 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15892 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15895 llvm_unreachable("Not a valid 256-bit x86 vector type!");
15899 /// Try to lower a vector shuffle as a 128-bit shuffles.
15900 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
15901 const APInt &Zeroable, SDValue V1, SDValue V2,
15902 const X86Subtarget &Subtarget,
15903 SelectionDAG &DAG) {
15904 assert(VT.getScalarSizeInBits() == 64 &&
15905 "Unexpected element type size for 128bit shuffle.");
15907 // To handle 256 bit vector requires VLX and most probably
15908 // function lowerV2X128VectorShuffle() is better solution.
15909 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
15911 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
15912 SmallVector<int, 4> WidenedMask;
15913 if (!canWidenShuffleElements(Mask, WidenedMask))
15916 // Try to use an insert into a zero vector.
15917 if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
15918 (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
15919 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
15920 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
15921 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15922 DAG.getIntPtrConstant(0, DL));
15923 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15924 getZeroVector(VT, Subtarget, DAG, DL), LoV,
15925 DAG.getIntPtrConstant(0, DL));
15928 // Check for patterns which can be matched with a single insert of a 256-bit
15930 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
15931 {0, 1, 2, 3, 0, 1, 2, 3});
15932 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
15933 {0, 1, 2, 3, 8, 9, 10, 11})) {
15934 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
15935 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15936 OnlyUsesV1 ? V1 : V2,
15937 DAG.getIntPtrConstant(0, DL));
15938 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15939 DAG.getIntPtrConstant(4, DL));
15942 assert(WidenedMask.size() == 4);
15944 // See if this is an insertion of the lower 128-bits of V2 into V1.
15945 bool IsInsert = true;
15947 for (int i = 0; i < 4; ++i) {
15948 assert(WidenedMask[i] >= -1);
15949 if (WidenedMask[i] < 0)
15952 // Make sure all V1 subvectors are in place.
15953 if (WidenedMask[i] < 4) {
15954 if (WidenedMask[i] != i) {
15959 // Make sure we only have a single V2 index and its the lowest 128-bits.
15960 if (V2Index >= 0 || WidenedMask[i] != 4) {
15967 if (IsInsert && V2Index >= 0) {
15968 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15969 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
15970 DAG.getIntPtrConstant(0, DL));
15971 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
15974 // Try to lower to vshuf64x2/vshuf32x4.
15975 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
15976 unsigned PermMask = 0;
15977 // Insure elements came from the same Op.
15978 for (int i = 0; i < 4; ++i) {
15979 assert(WidenedMask[i] >= -1);
15980 if (WidenedMask[i] < 0)
15983 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
15984 unsigned OpIndex = i / 2;
15985 if (Ops[OpIndex].isUndef())
15987 else if (Ops[OpIndex] != Op)
15990 // Convert the 128-bit shuffle mask selection values into 128-bit selection
15991 // bits defined by a vshuf64x2 instruction's immediate control byte.
15992 PermMask |= (WidenedMask[i] % 4) << (i * 2);
15995 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
15996 DAG.getConstant(PermMask, DL, MVT::i8));
15999 /// Handle lowering of 8-lane 64-bit floating point shuffles.
16000 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16001 const APInt &Zeroable, SDValue V1, SDValue V2,
16002 const X86Subtarget &Subtarget,
16003 SelectionDAG &DAG) {
16004 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16005 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16006 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16008 if (V2.isUndef()) {
16009 // Use low duplicate instructions for masks that match their pattern.
16010 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16011 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16013 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16014 // Non-half-crossing single input shuffles can be lowered with an
16015 // interleaved permutation.
16016 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16017 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16018 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16019 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16020 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16021 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
16024 SmallVector<int, 4> RepeatedMask;
16025 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16026 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16027 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16030 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16031 V2, Subtarget, DAG))
16034 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16037 // Check if the blend happens to exactly fit that of SHUFPD.
16038 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
16041 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16045 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16046 Zeroable, Subtarget, DAG))
16049 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16052 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16053 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16054 const APInt &Zeroable, SDValue V1, SDValue V2,
16055 const X86Subtarget &Subtarget,
16056 SelectionDAG &DAG) {
16057 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16058 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16059 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16061 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16062 // options to efficiently lower the shuffle.
16063 SmallVector<int, 4> RepeatedMask;
16064 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16065 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16067 // Use even/odd duplicate instructions for masks that match their pattern.
16068 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16069 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16070 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16071 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16074 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16075 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16077 // Use dedicated unpack instructions for masks that match their pattern.
16078 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16081 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16082 Zeroable, Subtarget, DAG))
16085 // Otherwise, fall back to a SHUFPS sequence.
16086 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16089 // If we have a single input shuffle with different shuffle patterns in the
16090 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16091 if (V2.isUndef() &&
16092 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16093 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16094 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16097 // If we have AVX512F support, we can use VEXPAND.
16098 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16099 V1, V2, DAG, Subtarget))
16102 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16105 /// Handle lowering of 8-lane 64-bit integer shuffles.
16106 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16107 const APInt &Zeroable, SDValue V1, SDValue V2,
16108 const X86Subtarget &Subtarget,
16109 SelectionDAG &DAG) {
16110 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16111 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16112 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16114 if (V2.isUndef()) {
16115 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16116 // can use lower latency instructions that will operate on all four
16118 SmallVector<int, 2> Repeated128Mask;
16119 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16120 SmallVector<int, 4> PSHUFDMask;
16121 scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16122 return DAG.getBitcast(
16124 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16125 DAG.getBitcast(MVT::v16i32, V1),
16126 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16129 SmallVector<int, 4> Repeated256Mask;
16130 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16131 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16132 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16135 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16136 V2, Subtarget, DAG))
16139 // Try to use shift instructions.
16140 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16141 Zeroable, Subtarget, DAG))
16144 // Try to use VALIGN.
16145 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16149 // Try to use PALIGNR.
16150 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16154 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16156 // If we have AVX512F support, we can use VEXPAND.
16157 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16161 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16162 Zeroable, Subtarget, DAG))
16165 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16168 /// Handle lowering of 16-lane 32-bit integer shuffles.
16169 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16170 const APInt &Zeroable, SDValue V1, SDValue V2,
16171 const X86Subtarget &Subtarget,
16172 SelectionDAG &DAG) {
16173 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16174 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16175 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16177 // Whenever we can lower this as a zext, that instruction is strictly faster
16178 // than any alternative. It also allows us to fold memory operands into the
16179 // shuffle in many cases.
16180 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16181 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16184 // If the shuffle mask is repeated in each 128-bit lane we can use more
16185 // efficient instructions that mirror the shuffles across the four 128-bit
16187 SmallVector<int, 4> RepeatedMask;
16188 bool Is128BitLaneRepeatedShuffle =
16189 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16190 if (Is128BitLaneRepeatedShuffle) {
16191 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16193 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16194 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16196 // Use dedicated unpack instructions for masks that match their pattern.
16197 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16201 // Try to use shift instructions.
16202 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16203 Zeroable, Subtarget, DAG))
16206 // Try to use VALIGN.
16207 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16211 // Try to use byte rotation instructions.
16212 if (Subtarget.hasBWI())
16213 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16217 // Assume that a single SHUFPS is faster than using a permv shuffle.
16218 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16219 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16220 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16221 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16222 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16223 CastV1, CastV2, DAG);
16224 return DAG.getBitcast(MVT::v16i32, ShufPS);
16226 // If we have AVX512F support, we can use VEXPAND.
16227 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16231 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16232 Zeroable, Subtarget, DAG))
16234 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16237 /// Handle lowering of 32-lane 16-bit integer shuffles.
16238 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16239 const APInt &Zeroable, SDValue V1, SDValue V2,
16240 const X86Subtarget &Subtarget,
16241 SelectionDAG &DAG) {
16242 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16243 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16244 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16245 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16247 // Whenever we can lower this as a zext, that instruction is strictly faster
16248 // than any alternative. It also allows us to fold memory operands into the
16249 // shuffle in many cases.
16250 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16251 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16254 // Use dedicated unpack instructions for masks that match their pattern.
16255 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16258 // Try to use shift instructions.
16259 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16260 Zeroable, Subtarget, DAG))
16263 // Try to use byte rotation instructions.
16264 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16268 if (V2.isUndef()) {
16269 SmallVector<int, 8> RepeatedMask;
16270 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16271 // As this is a single-input shuffle, the repeated mask should be
16272 // a strictly valid v8i16 mask that we can pass through to the v8i16
16273 // lowering to handle even the v32 case.
16274 return lowerV8I16GeneralSingleInputShuffle(
16275 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16279 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16280 Zeroable, Subtarget, DAG))
16283 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16284 Zeroable, Subtarget, DAG))
16287 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16290 /// Handle lowering of 64-lane 8-bit integer shuffles.
16291 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16292 const APInt &Zeroable, SDValue V1, SDValue V2,
16293 const X86Subtarget &Subtarget,
16294 SelectionDAG &DAG) {
16295 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16296 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16297 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16298 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16300 // Whenever we can lower this as a zext, that instruction is strictly faster
16301 // than any alternative. It also allows us to fold memory operands into the
16302 // shuffle in many cases.
16303 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16304 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16307 // Use dedicated unpack instructions for masks that match their pattern.
16308 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16311 // Use dedicated pack instructions for masks that match their pattern.
16312 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16316 // Try to use shift instructions.
16317 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16318 Zeroable, Subtarget, DAG))
16321 // Try to use byte rotation instructions.
16322 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16326 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16327 Zeroable, Subtarget, DAG))
16330 // VBMI can use VPERMV/VPERMV3 byte shuffles.
16331 if (Subtarget.hasVBMI())
16332 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16334 // Try to create an in-lane repeating shuffle mask and then shuffle the
16335 // results into the target lanes.
16336 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16337 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16340 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16341 Zeroable, Subtarget, DAG))
16344 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16347 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16348 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16351 // FIXME: Implement direct support for this type!
16352 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16355 /// High-level routine to lower various 512-bit x86 vector shuffles.
16357 /// This routine either breaks down the specific type of a 512-bit x86 vector
16358 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
16359 /// together based on the available instructions.
16360 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16361 MVT VT, SDValue V1, SDValue V2,
16362 const APInt &Zeroable,
16363 const X86Subtarget &Subtarget,
16364 SelectionDAG &DAG) {
16365 assert(Subtarget.hasAVX512() &&
16366 "Cannot lower 512-bit vectors w/ basic ISA!");
16368 // If we have a single input to the zero element, insert that into V1 if we
16369 // can do so cheaply.
16370 int NumElts = Mask.size();
16371 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16373 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16374 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16375 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16378 // Handle special cases where the lower or upper half is UNDEF.
16380 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16383 // Check for being able to broadcast a single element.
16384 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16388 // Dispatch to each element type for lowering. If we don't have support for
16389 // specific element type shuffles at 512 bits, immediately split them and
16390 // lower them. Each lowering routine of a given type is allowed to assume that
16391 // the requisite ISA extensions for that element type are available.
16392 switch (VT.SimpleTy) {
16394 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16396 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16398 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16400 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16402 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16404 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16407 llvm_unreachable("Not a valid 512-bit x86 vector type!");
16411 // Determine if this shuffle can be implemented with a KSHIFT instruction.
16412 // Returns the shift amount if possible or -1 if not. This is a simplified
16413 // version of matchShuffleAsShift.
16414 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
16415 int MaskOffset, const APInt &Zeroable) {
16416 int Size = Mask.size();
16418 auto CheckZeros = [&](int Shift, bool Left) {
16419 for (int j = 0; j < Shift; ++j)
16420 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
16426 auto MatchShift = [&](int Shift, bool Left) {
16427 unsigned Pos = Left ? Shift : 0;
16428 unsigned Low = Left ? 0 : Shift;
16429 unsigned Len = Size - Shift;
16430 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
16433 for (int Shift = 1; Shift != Size; ++Shift)
16434 for (bool Left : {true, false})
16435 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
16436 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
16444 // Lower vXi1 vector shuffles.
16445 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
16446 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
16447 // vector, shuffle and then truncate it back.
16448 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16449 MVT VT, SDValue V1, SDValue V2,
16450 const APInt &Zeroable,
16451 const X86Subtarget &Subtarget,
16452 SelectionDAG &DAG) {
16453 assert(Subtarget.hasAVX512() &&
16454 "Cannot lower 512-bit vectors w/o basic ISA!");
16456 unsigned NumElts = Mask.size();
16458 // Try to recognize shuffles that are just padding a subvector with zeros.
16459 unsigned SubvecElts = 0;
16460 for (int i = 0; i != (int)NumElts; ++i) {
16461 if (Mask[i] >= 0 && Mask[i] != i)
16466 assert(SubvecElts != NumElts && "Identity shuffle?");
16468 // Clip to a power 2.
16469 SubvecElts = PowerOf2Floor(SubvecElts);
16471 // Make sure the number of zeroable bits in the top at least covers the bits
16472 // not covered by the subvector.
16473 if (Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
16474 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
16475 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
16476 V1, DAG.getIntPtrConstant(0, DL));
16477 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16478 getZeroVector(VT, Subtarget, DAG, DL),
16479 Extract, DAG.getIntPtrConstant(0, DL));
16482 // Try to match KSHIFTs.
16483 // TODO: Support narrower than legal shifts by widening and extracting.
16484 if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) {
16485 unsigned Offset = 0;
16486 for (SDValue V : { V1, V2 }) {
16488 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
16490 return DAG.getNode(Opcode, DL, VT, V,
16491 DAG.getConstant(ShiftAmt, DL, MVT::i8));
16492 Offset += NumElts; // Increment for next iteration.
16498 switch (VT.SimpleTy) {
16500 llvm_unreachable("Expected a vector of i1 elements");
16502 ExtVT = MVT::v2i64;
16505 ExtVT = MVT::v4i32;
16508 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
16510 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
16513 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16514 // 256-bit operation available.
16515 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
16518 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16519 // 256-bit operation available.
16520 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
16521 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
16524 ExtVT = MVT::v64i8;
16528 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
16529 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
16531 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
16532 // i1 was sign extended we can use X86ISD::CVT2MASK.
16533 int NumElems = VT.getVectorNumElements();
16534 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
16535 (Subtarget.hasDQI() && (NumElems < 32)))
16536 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
16537 Shuffle, ISD::SETGT);
16539 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
16542 /// Helper function that returns true if the shuffle mask should be
16543 /// commuted to improve canonicalization.
16544 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
16545 int NumElements = Mask.size();
16547 int NumV1Elements = 0, NumV2Elements = 0;
16551 else if (M < NumElements)
16556 // Commute the shuffle as needed such that more elements come from V1 than
16557 // V2. This allows us to match the shuffle pattern strictly on how many
16558 // elements come from V1 without handling the symmetric cases.
16559 if (NumV2Elements > NumV1Elements)
16562 assert(NumV1Elements > 0 && "No V1 indices");
16564 if (NumV2Elements == 0)
16567 // When the number of V1 and V2 elements are the same, try to minimize the
16568 // number of uses of V2 in the low half of the vector. When that is tied,
16569 // ensure that the sum of indices for V1 is equal to or lower than the sum
16570 // indices for V2. When those are equal, try to ensure that the number of odd
16571 // indices for V1 is lower than the number of odd indices for V2.
16572 if (NumV1Elements == NumV2Elements) {
16573 int LowV1Elements = 0, LowV2Elements = 0;
16574 for (int M : Mask.slice(0, NumElements / 2))
16575 if (M >= NumElements)
16579 if (LowV2Elements > LowV1Elements)
16581 if (LowV2Elements == LowV1Elements) {
16582 int SumV1Indices = 0, SumV2Indices = 0;
16583 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16584 if (Mask[i] >= NumElements)
16586 else if (Mask[i] >= 0)
16588 if (SumV2Indices < SumV1Indices)
16590 if (SumV2Indices == SumV1Indices) {
16591 int NumV1OddIndices = 0, NumV2OddIndices = 0;
16592 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16593 if (Mask[i] >= NumElements)
16594 NumV2OddIndices += i % 2;
16595 else if (Mask[i] >= 0)
16596 NumV1OddIndices += i % 2;
16597 if (NumV2OddIndices < NumV1OddIndices)
16606 /// Top-level lowering for x86 vector shuffles.
16608 /// This handles decomposition, canonicalization, and lowering of all x86
16609 /// vector shuffles. Most of the specific lowering strategies are encapsulated
16610 /// above in helper routines. The canonicalization attempts to widen shuffles
16611 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
16612 /// s.t. only one of the two inputs needs to be tested, etc.
16613 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
16614 SelectionDAG &DAG) {
16615 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
16616 ArrayRef<int> Mask = SVOp->getMask();
16617 SDValue V1 = Op.getOperand(0);
16618 SDValue V2 = Op.getOperand(1);
16619 MVT VT = Op.getSimpleValueType();
16620 int NumElements = VT.getVectorNumElements();
16622 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
16624 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
16625 "Can't lower MMX shuffles");
16627 bool V1IsUndef = V1.isUndef();
16628 bool V2IsUndef = V2.isUndef();
16629 if (V1IsUndef && V2IsUndef)
16630 return DAG.getUNDEF(VT);
16632 // When we create a shuffle node we put the UNDEF node to second operand,
16633 // but in some cases the first operand may be transformed to UNDEF.
16634 // In this case we should just commute the node.
16636 return DAG.getCommutedVectorShuffle(*SVOp);
16638 // Check for non-undef masks pointing at an undef vector and make the masks
16639 // undef as well. This makes it easier to match the shuffle based solely on
16642 any_of(Mask, [NumElements](int M) { return M >= NumElements; })) {
16643 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
16644 for (int &M : NewMask)
16645 if (M >= NumElements)
16647 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
16650 // Check for illegal shuffle mask element index values.
16651 int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit;
16652 assert(llvm::all_of(Mask,
16653 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
16654 "Out of bounds shuffle index");
16656 // We actually see shuffles that are entirely re-arrangements of a set of
16657 // zero inputs. This mostly happens while decomposing complex shuffles into
16658 // simple ones. Directly lower these as a buildvector of zeros.
16659 APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
16660 if (Zeroable.isAllOnesValue())
16661 return getZeroVector(VT, Subtarget, DAG, DL);
16663 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
16665 // Create an alternative mask with info about zeroable elements.
16666 // Here we do not set undef elements as zeroable.
16667 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
16669 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
16670 for (int i = 0; i != NumElements; ++i)
16671 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
16672 ZeroableMask[i] = SM_SentinelZero;
16675 // Try to collapse shuffles into using a vector type with fewer elements but
16676 // wider element types. We cap this to not form integers or floating point
16677 // elements wider than 64 bits, but it might be interesting to form i128
16678 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
16679 SmallVector<int, 16> WidenedMask;
16680 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
16681 canWidenShuffleElements(ZeroableMask, WidenedMask)) {
16682 // Shuffle mask widening should not interfere with a broadcast opportunity
16683 // by obfuscating the operands with bitcasts.
16684 // TODO: Avoid lowering directly from this top-level function: make this
16685 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
16686 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16690 MVT NewEltVT = VT.isFloatingPoint()
16691 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
16692 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
16693 int NewNumElts = NumElements / 2;
16694 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
16695 // Make sure that the new vector type is legal. For example, v2f64 isn't
16697 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
16699 // Modify the new Mask to take all zeros from the all-zero vector.
16700 // Choose indices that are blend-friendly.
16701 bool UsedZeroVector = false;
16702 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
16703 "V2's non-undef elements are used?!");
16704 for (int i = 0; i != NewNumElts; ++i)
16705 if (WidenedMask[i] == SM_SentinelZero) {
16706 WidenedMask[i] = i + NewNumElts;
16707 UsedZeroVector = true;
16709 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
16710 // some elements to be undef.
16711 if (UsedZeroVector)
16712 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
16714 V1 = DAG.getBitcast(NewVT, V1);
16715 V2 = DAG.getBitcast(NewVT, V2);
16716 return DAG.getBitcast(
16717 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
16721 // Commute the shuffle if it will improve canonicalization.
16722 if (canonicalizeShuffleMaskWithCommute(Mask))
16723 return DAG.getCommutedVectorShuffle(*SVOp);
16725 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
16728 // For each vector width, delegate to a specialized lowering routine.
16729 if (VT.is128BitVector())
16730 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16732 if (VT.is256BitVector())
16733 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16735 if (VT.is512BitVector())
16736 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16739 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
16741 llvm_unreachable("Unimplemented!");
16744 /// Try to lower a VSELECT instruction to a vector shuffle.
16745 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
16746 const X86Subtarget &Subtarget,
16747 SelectionDAG &DAG) {
16748 SDValue Cond = Op.getOperand(0);
16749 SDValue LHS = Op.getOperand(1);
16750 SDValue RHS = Op.getOperand(2);
16751 MVT VT = Op.getSimpleValueType();
16753 // Only non-legal VSELECTs reach this lowering, convert those into generic
16754 // shuffles and re-use the shuffle lowering path for blends.
16755 SmallVector<int, 32> Mask;
16756 if (createShuffleMaskFromVSELECT(Mask, Cond))
16757 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
16762 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
16763 SDValue Cond = Op.getOperand(0);
16764 SDValue LHS = Op.getOperand(1);
16765 SDValue RHS = Op.getOperand(2);
16767 // A vselect where all conditions and data are constants can be optimized into
16768 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
16769 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
16770 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
16771 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
16774 // Try to lower this to a blend-style vector shuffle. This can handle all
16775 // constant condition cases.
16776 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
16779 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
16780 // with patterns on the mask registers on AVX-512.
16781 MVT CondVT = Cond.getSimpleValueType();
16782 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
16783 if (CondEltSize == 1)
16786 // Variable blends are only legal from SSE4.1 onward.
16787 if (!Subtarget.hasSSE41())
16791 MVT VT = Op.getSimpleValueType();
16792 unsigned EltSize = VT.getScalarSizeInBits();
16793 unsigned NumElts = VT.getVectorNumElements();
16795 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
16796 // into an i1 condition so that we can use the mask-based 512-bit blend
16798 if (VT.getSizeInBits() == 512) {
16799 // Build a mask by testing the condition against zero.
16800 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
16801 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
16802 DAG.getConstant(0, dl, CondVT),
16804 // Now return a new VSELECT using the mask.
16805 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
16808 // SEXT/TRUNC cases where the mask doesn't match the destination size.
16809 if (CondEltSize != EltSize) {
16810 // If we don't have a sign splat, rely on the expansion.
16811 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
16814 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
16815 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
16816 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
16817 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
16820 // Only some types will be legal on some subtargets. If we can emit a legal
16821 // VSELECT-matching blend, return Op, and but if we need to expand, return
16823 switch (VT.SimpleTy) {
16825 // Most of the vector types have blends past SSE4.1.
16829 // The byte blends for AVX vectors were introduced only in AVX2.
16830 if (Subtarget.hasAVX2())
16836 case MVT::v16i16: {
16837 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
16838 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
16839 Cond = DAG.getBitcast(CastVT, Cond);
16840 LHS = DAG.getBitcast(CastVT, LHS);
16841 RHS = DAG.getBitcast(CastVT, RHS);
16842 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
16843 return DAG.getBitcast(VT, Select);
16848 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
16849 MVT VT = Op.getSimpleValueType();
16852 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
16855 if (VT.getSizeInBits() == 8) {
16856 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
16857 Op.getOperand(0), Op.getOperand(1));
16858 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
16861 if (VT == MVT::f32) {
16862 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
16863 // the result back to FR32 register. It's only worth matching if the
16864 // result has a single use which is a store or a bitcast to i32. And in
16865 // the case of a store, it's not worth it if the index is a constant 0,
16866 // because a MOVSSmr can be used instead, which is smaller and faster.
16867 if (!Op.hasOneUse())
16869 SDNode *User = *Op.getNode()->use_begin();
16870 if ((User->getOpcode() != ISD::STORE ||
16871 isNullConstant(Op.getOperand(1))) &&
16872 (User->getOpcode() != ISD::BITCAST ||
16873 User->getValueType(0) != MVT::i32))
16875 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
16876 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
16878 return DAG.getBitcast(MVT::f32, Extract);
16881 if (VT == MVT::i32 || VT == MVT::i64) {
16882 // ExtractPS/pextrq works with constant index.
16883 if (isa<ConstantSDNode>(Op.getOperand(1)))
16890 /// Extract one bit from mask vector, like v16i1 or v8i1.
16891 /// AVX-512 feature.
16892 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
16893 const X86Subtarget &Subtarget) {
16894 SDValue Vec = Op.getOperand(0);
16896 MVT VecVT = Vec.getSimpleValueType();
16897 SDValue Idx = Op.getOperand(1);
16898 MVT EltVT = Op.getSimpleValueType();
16900 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
16901 "Unexpected vector type in ExtractBitFromMaskVector");
16903 // variable index can't be handled in mask registers,
16904 // extend vector to VR512/128
16905 if (!isa<ConstantSDNode>(Idx)) {
16906 unsigned NumElts = VecVT.getVectorNumElements();
16907 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
16908 // than extending to 128/256bit.
16909 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
16910 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
16911 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
16912 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
16913 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
16916 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
16917 if (IdxVal == 0) // the operation is legal
16920 // Extend to natively supported kshift.
16921 unsigned NumElems = VecVT.getVectorNumElements();
16922 MVT WideVecVT = VecVT;
16923 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
16924 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16925 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
16926 DAG.getUNDEF(WideVecVT), Vec,
16927 DAG.getIntPtrConstant(0, dl));
16930 // Use kshiftr instruction to move to the lower element.
16931 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
16932 DAG.getConstant(IdxVal, dl, MVT::i8));
16934 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
16935 DAG.getIntPtrConstant(0, dl));
16939 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
16940 SelectionDAG &DAG) const {
16942 SDValue Vec = Op.getOperand(0);
16943 MVT VecVT = Vec.getSimpleValueType();
16944 SDValue Idx = Op.getOperand(1);
16946 if (VecVT.getVectorElementType() == MVT::i1)
16947 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
16949 if (!isa<ConstantSDNode>(Idx)) {
16950 // Its more profitable to go through memory (1 cycles throughput)
16951 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
16952 // IACA tool was used to get performance estimation
16953 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
16955 // example : extractelement <16 x i8> %a, i32 %i
16957 // Block Throughput: 3.00 Cycles
16958 // Throughput Bottleneck: Port5
16960 // | Num Of | Ports pressure in cycles | |
16961 // | Uops | 0 - DV | 5 | 6 | 7 | |
16962 // ---------------------------------------------
16963 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
16964 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
16965 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
16966 // Total Num Of Uops: 4
16969 // Block Throughput: 1.00 Cycles
16970 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
16972 // | | Ports pressure in cycles | |
16973 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
16974 // ---------------------------------------------------------
16975 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
16976 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
16977 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
16978 // Total Num Of Uops: 4
16983 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
16985 // If this is a 256-bit vector result, first extract the 128-bit vector and
16986 // then extract the element from the 128-bit vector.
16987 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
16988 // Get the 128-bit vector.
16989 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
16990 MVT EltVT = VecVT.getVectorElementType();
16992 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
16993 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
16995 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
16996 // this can be done with a mask.
16997 IdxVal &= ElemsPerChunk - 1;
16998 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
16999 DAG.getIntPtrConstant(IdxVal, dl));
17002 assert(VecVT.is128BitVector() && "Unexpected vector length");
17004 MVT VT = Op.getSimpleValueType();
17006 if (VT.getSizeInBits() == 16) {
17007 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17008 // we're going to zero extend the register or fold the store (SSE41 only).
17009 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17010 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17011 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17012 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17013 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17015 // Transform it so it match pextrw which produces a 32-bit result.
17016 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17017 Op.getOperand(0), Op.getOperand(1));
17018 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17021 if (Subtarget.hasSSE41())
17022 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17025 // TODO: We only extract a single element from v16i8, we can probably afford
17026 // to be more aggressive here before using the default approach of spilling to
17028 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17029 // Extract either the lowest i32 or any i16, and extract the sub-byte.
17030 int DWordIdx = IdxVal / 4;
17031 if (DWordIdx == 0) {
17032 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17033 DAG.getBitcast(MVT::v4i32, Vec),
17034 DAG.getIntPtrConstant(DWordIdx, dl));
17035 int ShiftVal = (IdxVal % 4) * 8;
17037 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17038 DAG.getConstant(ShiftVal, dl, MVT::i8));
17039 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17042 int WordIdx = IdxVal / 2;
17043 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17044 DAG.getBitcast(MVT::v8i16, Vec),
17045 DAG.getIntPtrConstant(WordIdx, dl));
17046 int ShiftVal = (IdxVal % 2) * 8;
17048 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17049 DAG.getConstant(ShiftVal, dl, MVT::i8));
17050 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17053 if (VT.getSizeInBits() == 32) {
17057 // SHUFPS the element to the lowest double word, then movss.
17058 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17059 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17060 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17061 DAG.getIntPtrConstant(0, dl));
17064 if (VT.getSizeInBits() == 64) {
17065 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17066 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17067 // to match extract_elt for f64.
17071 // UNPCKHPD the element to the lowest double word, then movsd.
17072 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17073 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17074 int Mask[2] = { 1, -1 };
17075 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17076 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17077 DAG.getIntPtrConstant(0, dl));
17083 /// Insert one bit to mask vector, like v16i1 or v8i1.
17084 /// AVX-512 feature.
17085 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17086 const X86Subtarget &Subtarget) {
17088 SDValue Vec = Op.getOperand(0);
17089 SDValue Elt = Op.getOperand(1);
17090 SDValue Idx = Op.getOperand(2);
17091 MVT VecVT = Vec.getSimpleValueType();
17093 if (!isa<ConstantSDNode>(Idx)) {
17094 // Non constant index. Extend source and destination,
17095 // insert element and then truncate the result.
17096 unsigned NumElts = VecVT.getVectorNumElements();
17097 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17098 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17099 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17100 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17101 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17102 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17105 // Copy into a k-register, extract to v1i1 and insert_subvector.
17106 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17108 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17112 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17113 SelectionDAG &DAG) const {
17114 MVT VT = Op.getSimpleValueType();
17115 MVT EltVT = VT.getVectorElementType();
17116 unsigned NumElts = VT.getVectorNumElements();
17118 if (EltVT == MVT::i1)
17119 return InsertBitToMaskVector(Op, DAG, Subtarget);
17122 SDValue N0 = Op.getOperand(0);
17123 SDValue N1 = Op.getOperand(1);
17124 SDValue N2 = Op.getOperand(2);
17126 auto *N2C = dyn_cast<ConstantSDNode>(N2);
17127 if (!N2C || N2C->getAPIntValue().uge(NumElts))
17129 uint64_t IdxVal = N2C->getZExtValue();
17131 bool IsZeroElt = X86::isZeroNode(N1);
17132 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17134 // If we are inserting a element, see if we can do this more efficiently with
17135 // a blend shuffle with a rematerializable vector than a costly integer
17137 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17138 16 <= EltVT.getSizeInBits()) {
17139 SmallVector<int, 8> BlendMask;
17140 for (unsigned i = 0; i != NumElts; ++i)
17141 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17142 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17143 : getOnesVector(VT, DAG, dl);
17144 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17147 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17148 // into that, and then insert the subvector back into the result.
17149 if (VT.is256BitVector() || VT.is512BitVector()) {
17150 // With a 256-bit vector, we can insert into the zero element efficiently
17151 // using a blend if we have AVX or AVX2 and the right data type.
17152 if (VT.is256BitVector() && IdxVal == 0) {
17153 // TODO: It is worthwhile to cast integer to floating point and back
17154 // and incur a domain crossing penalty if that's what we'll end up
17155 // doing anyway after extracting to a 128-bit vector.
17156 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17157 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17158 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17159 N2 = DAG.getIntPtrConstant(1, dl);
17160 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
17164 // Get the desired 128-bit vector chunk.
17165 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17167 // Insert the element into the desired chunk.
17168 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17169 assert(isPowerOf2_32(NumEltsIn128));
17170 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17171 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17173 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17174 DAG.getIntPtrConstant(IdxIn128, dl));
17176 // Insert the changed part back into the bigger vector
17177 return insert128BitVector(N0, V, IdxVal, DAG, dl);
17179 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17181 // This will be just movd/movq/movss/movsd.
17182 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17183 (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17184 EltVT == MVT::i64)) {
17185 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17186 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17189 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17190 // argument. SSE41 required for pinsrb.
17191 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17193 if (VT == MVT::v8i16) {
17194 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17195 Opc = X86ISD::PINSRW;
17197 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17198 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17199 Opc = X86ISD::PINSRB;
17202 if (N1.getValueType() != MVT::i32)
17203 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17204 if (N2.getValueType() != MVT::i32)
17205 N2 = DAG.getIntPtrConstant(IdxVal, dl);
17206 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17209 if (Subtarget.hasSSE41()) {
17210 if (EltVT == MVT::f32) {
17211 // Bits [7:6] of the constant are the source select. This will always be
17212 // zero here. The DAG Combiner may combine an extract_elt index into
17213 // these bits. For example (insert (extract, 3), 2) could be matched by
17214 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17215 // Bits [5:4] of the constant are the destination select. This is the
17216 // value of the incoming immediate.
17217 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17218 // combine either bitwise AND or insert of float 0.0 to set these bits.
17220 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17221 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17222 // If this is an insertion of 32-bits into the low 32-bits of
17223 // a vector, we prefer to generate a blend with immediate rather
17224 // than an insertps. Blends are simpler operations in hardware and so
17225 // will always have equal or better performance than insertps.
17226 // But if optimizing for size and there's a load folding opportunity,
17227 // generate insertps because blendps does not have a 32-bit memory
17229 N2 = DAG.getIntPtrConstant(1, dl);
17230 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17231 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
17233 N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
17234 // Create this as a scalar to vector..
17235 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17236 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
17239 // PINSR* works with constant index.
17240 if (EltVT == MVT::i32 || EltVT == MVT::i64)
17247 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17248 SelectionDAG &DAG) {
17250 MVT OpVT = Op.getSimpleValueType();
17252 // It's always cheaper to replace a xor+movd with xorps and simplifies further
17254 if (X86::isZeroNode(Op.getOperand(0)))
17255 return getZeroVector(OpVT, Subtarget, DAG, dl);
17257 // If this is a 256-bit vector result, first insert into a 128-bit
17258 // vector and then insert into the 256-bit vector.
17259 if (!OpVT.is128BitVector()) {
17260 // Insert into a 128-bit vector.
17261 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17262 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17263 OpVT.getVectorNumElements() / SizeFactor);
17265 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17267 // Insert the 128-bit vector.
17268 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17270 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17271 "Expected an SSE type!");
17273 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17274 if (OpVT == MVT::v4i32)
17277 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17278 return DAG.getBitcast(
17279 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17282 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
17283 // simple superregister reference or explicit instructions to insert
17284 // the upper bits of a vector.
17285 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17286 SelectionDAG &DAG) {
17287 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17289 return insert1BitVector(Op, DAG, Subtarget);
17292 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17293 SelectionDAG &DAG) {
17294 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17295 "Only vXi1 extract_subvectors need custom lowering");
17298 SDValue Vec = Op.getOperand(0);
17299 SDValue Idx = Op.getOperand(1);
17301 if (!isa<ConstantSDNode>(Idx))
17304 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17305 if (IdxVal == 0) // the operation is legal
17308 MVT VecVT = Vec.getSimpleValueType();
17309 unsigned NumElems = VecVT.getVectorNumElements();
17311 // Extend to natively supported kshift.
17312 MVT WideVecVT = VecVT;
17313 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17314 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17315 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17316 DAG.getUNDEF(WideVecVT), Vec,
17317 DAG.getIntPtrConstant(0, dl));
17320 // Shift to the LSB.
17321 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17322 DAG.getConstant(IdxVal, dl, MVT::i8));
17324 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
17325 DAG.getIntPtrConstant(0, dl));
17328 // Returns the appropriate wrapper opcode for a global reference.
17329 unsigned X86TargetLowering::getGlobalWrapperKind(
17330 const GlobalValue *GV, const unsigned char OpFlags) const {
17331 // References to absolute symbols are never PC-relative.
17332 if (GV && GV->isAbsoluteSymbolRef())
17333 return X86ISD::Wrapper;
17335 CodeModel::Model M = getTargetMachine().getCodeModel();
17336 if (Subtarget.isPICStyleRIPRel() &&
17337 (M == CodeModel::Small || M == CodeModel::Kernel))
17338 return X86ISD::WrapperRIP;
17340 // GOTPCREL references must always use RIP.
17341 if (OpFlags == X86II::MO_GOTPCREL)
17342 return X86ISD::WrapperRIP;
17344 return X86ISD::Wrapper;
17347 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
17348 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
17349 // one of the above mentioned nodes. It has to be wrapped because otherwise
17350 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
17351 // be used to form addressing mode. These wrapped nodes will be selected
17354 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
17355 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
17357 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17358 // global base reg.
17359 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17361 auto PtrVT = getPointerTy(DAG.getDataLayout());
17362 SDValue Result = DAG.getTargetConstantPool(
17363 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
17365 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17366 // With PIC, the address is actually $g + Offset.
17369 DAG.getNode(ISD::ADD, DL, PtrVT,
17370 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17376 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
17377 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
17379 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17380 // global base reg.
17381 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17383 auto PtrVT = getPointerTy(DAG.getDataLayout());
17384 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
17386 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17388 // With PIC, the address is actually $g + Offset.
17391 DAG.getNode(ISD::ADD, DL, PtrVT,
17392 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17397 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
17398 SelectionDAG &DAG) const {
17399 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17403 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
17404 // Create the TargetBlockAddressAddress node.
17405 unsigned char OpFlags =
17406 Subtarget.classifyBlockAddressReference();
17407 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
17408 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
17410 auto PtrVT = getPointerTy(DAG.getDataLayout());
17411 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
17412 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
17414 // With PIC, the address is actually $g + Offset.
17415 if (isGlobalRelativeToPICBase(OpFlags)) {
17416 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17417 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17423 /// Creates target global address or external symbol nodes for calls or
17425 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
17426 bool ForCall) const {
17427 // Unpack the global address or external symbol.
17428 const SDLoc &dl = SDLoc(Op);
17429 const GlobalValue *GV = nullptr;
17430 int64_t Offset = 0;
17431 const char *ExternalSym = nullptr;
17432 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
17433 GV = G->getGlobal();
17434 Offset = G->getOffset();
17436 const auto *ES = cast<ExternalSymbolSDNode>(Op);
17437 ExternalSym = ES->getSymbol();
17440 // Calculate some flags for address lowering.
17441 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
17442 unsigned char OpFlags;
17444 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
17446 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
17447 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
17448 bool NeedsLoad = isGlobalStubReference(OpFlags);
17450 CodeModel::Model M = DAG.getTarget().getCodeModel();
17451 auto PtrVT = getPointerTy(DAG.getDataLayout());
17455 // Create a target global address if this is a global. If possible, fold the
17456 // offset into the global address reference. Otherwise, ADD it on later.
17457 int64_t GlobalOffset = 0;
17458 if (OpFlags == X86II::MO_NO_FLAG &&
17459 X86::isOffsetSuitableForCodeModel(Offset, M)) {
17460 std::swap(GlobalOffset, Offset);
17462 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
17464 // If this is not a global address, this must be an external symbol.
17465 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
17468 // If this is a direct call, avoid the wrapper if we don't need to do any
17469 // loads or adds. This allows SDAG ISel to match direct calls.
17470 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
17473 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
17475 // With PIC, the address is actually $g + Offset.
17477 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17478 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17481 // For globals that require a load from a stub to get the address, emit the
17484 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
17485 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17487 // If there was a non-zero offset that we didn't fold, create an explicit
17488 // addition for it.
17490 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
17491 DAG.getConstant(Offset, dl, PtrVT));
17497 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
17498 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17502 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
17503 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
17504 unsigned char OperandFlags, bool LocalDynamic = false) {
17505 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17506 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17508 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17509 GA->getValueType(0),
17513 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
17517 SDValue Ops[] = { Chain, TGA, *InFlag };
17518 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17520 SDValue Ops[] = { Chain, TGA };
17521 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17524 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
17525 MFI.setAdjustsStack(true);
17526 MFI.setHasCalls(true);
17528 SDValue Flag = Chain.getValue(1);
17529 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
17532 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
17534 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17537 SDLoc dl(GA); // ? function entry point might be better
17538 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17539 DAG.getNode(X86ISD::GlobalBaseReg,
17540 SDLoc(), PtrVT), InFlag);
17541 InFlag = Chain.getValue(1);
17543 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
17546 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
17548 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17550 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
17551 X86::RAX, X86II::MO_TLSGD);
17554 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
17560 // Get the start address of the TLS block for this module.
17561 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
17562 .getInfo<X86MachineFunctionInfo>();
17563 MFI->incNumLocalDynamicTLSAccesses();
17567 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
17568 X86II::MO_TLSLD, /*LocalDynamic=*/true);
17571 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17572 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
17573 InFlag = Chain.getValue(1);
17574 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
17575 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
17578 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
17582 unsigned char OperandFlags = X86II::MO_DTPOFF;
17583 unsigned WrapperKind = X86ISD::Wrapper;
17584 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17585 GA->getValueType(0),
17586 GA->getOffset(), OperandFlags);
17587 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
17589 // Add x@dtpoff with the base.
17590 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
17593 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
17594 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17595 const EVT PtrVT, TLSModel::Model model,
17596 bool is64Bit, bool isPIC) {
17599 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
17600 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
17601 is64Bit ? 257 : 256));
17603 SDValue ThreadPointer =
17604 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
17605 MachinePointerInfo(Ptr));
17607 unsigned char OperandFlags = 0;
17608 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
17610 unsigned WrapperKind = X86ISD::Wrapper;
17611 if (model == TLSModel::LocalExec) {
17612 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
17613 } else if (model == TLSModel::InitialExec) {
17615 OperandFlags = X86II::MO_GOTTPOFF;
17616 WrapperKind = X86ISD::WrapperRIP;
17618 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
17621 llvm_unreachable("Unexpected model");
17624 // emit "addl x@ntpoff,%eax" (local exec)
17625 // or "addl x@indntpoff,%eax" (initial exec)
17626 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
17628 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
17629 GA->getOffset(), OperandFlags);
17630 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
17632 if (model == TLSModel::InitialExec) {
17633 if (isPIC && !is64Bit) {
17634 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
17635 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
17639 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
17640 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17643 // The address of the thread local variable is the add of the thread
17644 // pointer with the offset of the variable.
17645 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
17649 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
17651 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
17653 if (DAG.getTarget().useEmulatedTLS())
17654 return LowerToTLSEmulatedModel(GA, DAG);
17656 const GlobalValue *GV = GA->getGlobal();
17657 auto PtrVT = getPointerTy(DAG.getDataLayout());
17658 bool PositionIndependent = isPositionIndependent();
17660 if (Subtarget.isTargetELF()) {
17661 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
17663 case TLSModel::GeneralDynamic:
17664 if (Subtarget.is64Bit())
17665 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
17666 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
17667 case TLSModel::LocalDynamic:
17668 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
17669 Subtarget.is64Bit());
17670 case TLSModel::InitialExec:
17671 case TLSModel::LocalExec:
17672 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
17673 PositionIndependent);
17675 llvm_unreachable("Unknown TLS model.");
17678 if (Subtarget.isTargetDarwin()) {
17679 // Darwin only has one model of TLS. Lower to that.
17680 unsigned char OpFlag = 0;
17681 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
17682 X86ISD::WrapperRIP : X86ISD::Wrapper;
17684 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17685 // global base reg.
17686 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
17688 OpFlag = X86II::MO_TLVP_PIC_BASE;
17690 OpFlag = X86II::MO_TLVP;
17692 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
17693 GA->getValueType(0),
17694 GA->getOffset(), OpFlag);
17695 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
17697 // With PIC32, the address is actually $g + Offset.
17699 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
17700 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
17703 // Lowering the machine isd will make sure everything is in the right
17705 SDValue Chain = DAG.getEntryNode();
17706 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17707 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
17708 SDValue Args[] = { Chain, Offset };
17709 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
17710 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
17711 DAG.getIntPtrConstant(0, DL, true),
17712 Chain.getValue(1), DL);
17714 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
17715 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17716 MFI.setAdjustsStack(true);
17718 // And our return value (tls address) is in the standard call return value
17720 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
17721 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
17724 if (Subtarget.isOSWindows()) {
17725 // Just use the implicit TLS architecture
17726 // Need to generate something similar to:
17727 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
17729 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
17730 // mov rcx, qword [rdx+rcx*8]
17731 // mov eax, .tls$:tlsvar
17732 // [rax+rcx] contains the address
17733 // Windows 64bit: gs:0x58
17734 // Windows 32bit: fs:__tls_array
17737 SDValue Chain = DAG.getEntryNode();
17739 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
17740 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
17741 // use its literal value of 0x2C.
17742 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
17743 ? Type::getInt8PtrTy(*DAG.getContext(),
17745 : Type::getInt32PtrTy(*DAG.getContext(),
17748 SDValue TlsArray = Subtarget.is64Bit()
17749 ? DAG.getIntPtrConstant(0x58, dl)
17750 : (Subtarget.isTargetWindowsGNU()
17751 ? DAG.getIntPtrConstant(0x2C, dl)
17752 : DAG.getExternalSymbol("_tls_array", PtrVT));
17754 SDValue ThreadPointer =
17755 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
17758 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
17759 res = ThreadPointer;
17761 // Load the _tls_index variable
17762 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
17763 if (Subtarget.is64Bit())
17764 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
17765 MachinePointerInfo(), MVT::i32);
17767 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
17769 auto &DL = DAG.getDataLayout();
17771 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
17772 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
17774 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
17777 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
17779 // Get the offset of start of .tls section
17780 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17781 GA->getValueType(0),
17782 GA->getOffset(), X86II::MO_SECREL);
17783 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
17785 // The address of the thread local variable is the add of the thread
17786 // pointer with the offset of the variable.
17787 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
17790 llvm_unreachable("TLS not implemented for this target.");
17793 /// Lower SRA_PARTS and friends, which return two i32 values
17794 /// and take a 2 x i32 value to shift plus a shift amount.
17795 /// TODO: Can this be moved to general expansion code?
17796 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
17797 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
17798 MVT VT = Op.getSimpleValueType();
17799 unsigned VTBits = VT.getSizeInBits();
17801 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
17802 SDValue ShOpLo = Op.getOperand(0);
17803 SDValue ShOpHi = Op.getOperand(1);
17804 SDValue ShAmt = Op.getOperand(2);
17805 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
17806 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
17808 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
17809 DAG.getConstant(VTBits - 1, dl, MVT::i8));
17810 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
17811 DAG.getConstant(VTBits - 1, dl, MVT::i8))
17812 : DAG.getConstant(0, dl, VT);
17814 SDValue Tmp2, Tmp3;
17815 if (Op.getOpcode() == ISD::SHL_PARTS) {
17816 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
17817 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
17819 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
17820 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
17823 // If the shift amount is larger or equal than the width of a part we can't
17824 // rely on the results of shld/shrd. Insert a test and select the appropriate
17825 // values for large shift amounts.
17826 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
17827 DAG.getConstant(VTBits, dl, MVT::i8));
17828 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
17829 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
17832 if (Op.getOpcode() == ISD::SHL_PARTS) {
17833 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
17834 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
17836 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
17837 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
17840 return DAG.getMergeValues({ Lo, Hi }, dl);
17843 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
17844 SelectionDAG &DAG) {
17845 MVT VT = Op.getSimpleValueType();
17846 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
17847 "Unexpected funnel shift opcode!");
17850 SDValue Op0 = Op.getOperand(0);
17851 SDValue Op1 = Op.getOperand(1);
17852 SDValue Amt = Op.getOperand(2);
17854 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
17856 if (VT.isVector()) {
17857 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
17860 std::swap(Op0, Op1);
17862 APInt APIntShiftAmt;
17863 if (isConstantSplat(Amt, APIntShiftAmt)) {
17864 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
17865 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
17866 Op0, Op1, DAG.getConstant(ShiftAmt, DL, MVT::i8));
17869 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
17873 assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
17874 "Unexpected funnel shift type!");
17876 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
17877 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
17878 if (!OptForSize && Subtarget.isSHLDSlow())
17882 std::swap(Op0, Op1);
17884 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
17885 if (VT == MVT::i16)
17886 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
17887 DAG.getConstant(15, DL, Amt.getValueType()));
17889 unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
17890 return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
17893 // Try to use a packed vector operation to handle i64 on 32-bit targets when
17894 // AVX512DQ is enabled.
17895 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
17896 const X86Subtarget &Subtarget) {
17897 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
17898 Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
17899 SDValue Src = Op.getOperand(0);
17900 MVT SrcVT = Src.getSimpleValueType();
17901 MVT VT = Op.getSimpleValueType();
17903 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
17904 (VT != MVT::f32 && VT != MVT::f64))
17907 // Pack the i64 into a vector, do the operation and extract.
17909 // Using 256-bit to ensure result is 128-bits for f32 case.
17910 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
17911 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
17912 MVT VecVT = MVT::getVectorVT(VT, NumElts);
17915 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
17916 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
17917 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
17918 DAG.getIntPtrConstant(0, dl));
17921 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
17922 const X86Subtarget &Subtarget) {
17924 case ISD::SINT_TO_FP:
17925 // TODO: Handle wider types with AVX/AVX512.
17926 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
17928 // CVTDQ2PS or (V)CVTDQ2PD
17929 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
17931 case ISD::UINT_TO_FP:
17932 // TODO: Handle wider types and i64 elements.
17933 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
17935 // VCVTUDQ2PS or VCVTUDQ2PD
17936 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
17943 /// Given a scalar cast operation that is extracted from a vector, try to
17944 /// vectorize the cast op followed by extraction. This will avoid an expensive
17945 /// round-trip between XMM and GPR.
17946 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
17947 const X86Subtarget &Subtarget) {
17948 // TODO: This could be enhanced to handle smaller integer types by peeking
17949 // through an extend.
17950 SDValue Extract = Cast.getOperand(0);
17951 MVT DestVT = Cast.getSimpleValueType();
17952 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
17953 !isa<ConstantSDNode>(Extract.getOperand(1)))
17956 // See if we have a 128-bit vector cast op for this type of cast.
17957 SDValue VecOp = Extract.getOperand(0);
17958 MVT FromVT = VecOp.getSimpleValueType();
17959 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
17960 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
17961 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
17962 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
17965 // If we are extracting from a non-zero element, first shuffle the source
17966 // vector to allow extracting from element zero.
17968 if (!isNullConstant(Extract.getOperand(1))) {
17969 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
17970 Mask[0] = Extract.getConstantOperandVal(1);
17971 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
17973 // If the source vector is wider than 128-bits, extract the low part. Do not
17974 // create an unnecessarily wide vector cast op.
17975 if (FromVT != Vec128VT)
17976 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
17978 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
17979 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
17980 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
17981 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
17982 DAG.getIntPtrConstant(0, DL));
17985 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
17986 SelectionDAG &DAG) const {
17987 SDValue Src = Op.getOperand(0);
17988 MVT SrcVT = Src.getSimpleValueType();
17989 MVT VT = Op.getSimpleValueType();
17992 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
17995 if (SrcVT.isVector()) {
17996 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
17997 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
17998 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
17999 DAG.getUNDEF(SrcVT)));
18004 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
18005 "Unknown SINT_TO_FP to lower!");
18007 // These are really Legal; return the operand so the caller accepts it as
18009 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
18011 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
18014 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18017 SDValue ValueToStore = Op.getOperand(0);
18018 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
18019 !Subtarget.is64Bit())
18020 // Bitcasting to f64 here allows us to do a single 64-bit store from
18021 // an SSE register, avoiding the store forwarding penalty that would come
18022 // with two 32-bit stores.
18023 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18025 unsigned Size = SrcVT.getSizeInBits()/8;
18026 MachineFunction &MF = DAG.getMachineFunction();
18027 auto PtrVT = getPointerTy(MF.getDataLayout());
18028 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18029 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18030 SDValue Chain = DAG.getStore(
18031 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18032 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18033 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18036 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18038 SelectionDAG &DAG) const {
18042 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18044 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18046 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18048 unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18050 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18051 MachineMemOperand *LoadMMO;
18053 int SSFI = FI->getIndex();
18054 LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18055 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18056 MachineMemOperand::MOLoad, ByteSize, ByteSize);
18058 LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18059 StackSlot = StackSlot.getOperand(1);
18061 SDValue FILDOps[] = {Chain, StackSlot};
18063 DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18064 Tys, FILDOps, SrcVT, LoadMMO);
18067 Chain = Result.getValue(1);
18068 SDValue InFlag = Result.getValue(2);
18070 // FIXME: Currently the FST is glued to the FILD_FLAG. This
18071 // shouldn't be necessary except that RFP cannot be live across
18072 // multiple blocks. When stackifier is fixed, they can be uncoupled.
18073 MachineFunction &MF = DAG.getMachineFunction();
18074 unsigned SSFISize = Op.getValueSizeInBits() / 8;
18075 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18076 auto PtrVT = getPointerTy(MF.getDataLayout());
18077 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18078 Tys = DAG.getVTList(MVT::Other);
18079 SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18080 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18081 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18082 MachineMemOperand::MOStore, SSFISize, SSFISize);
18084 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18085 Op.getValueType(), StoreMMO);
18086 Result = DAG.getLoad(
18087 Op.getValueType(), DL, Chain, StackSlot,
18088 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18094 /// 64-bit unsigned integer to double expansion.
18095 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18096 const X86Subtarget &Subtarget) {
18097 // This algorithm is not obvious. Here it is what we're trying to output:
18100 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18101 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18103 haddpd %xmm0, %xmm0
18105 pshufd $0x4e, %xmm0, %xmm1
18111 LLVMContext *Context = DAG.getContext();
18113 // Build some magic constants.
18114 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18115 Constant *C0 = ConstantDataVector::get(*Context, CV0);
18116 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18117 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18119 SmallVector<Constant*,2> CV1;
18121 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18122 APInt(64, 0x4330000000000000ULL))));
18124 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18125 APInt(64, 0x4530000000000000ULL))));
18126 Constant *C1 = ConstantVector::get(CV1);
18127 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18129 // Load the 64-bit value into an XMM register.
18130 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
18133 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18134 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18135 /* Alignment = */ 16);
18137 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18140 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18141 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18142 /* Alignment = */ 16);
18143 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18144 // TODO: Are there any fast-math-flags to propagate here?
18145 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18148 if (Subtarget.hasSSE3()) {
18149 // FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'.
18150 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18152 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18153 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
18156 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
18157 DAG.getIntPtrConstant(0, dl));
18160 /// 32-bit unsigned integer to float expansion.
18161 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
18162 const X86Subtarget &Subtarget) {
18164 // FP constant to bias correct the final result.
18165 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
18168 // Load the 32-bit value into an XMM register.
18169 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
18172 // Zero out the upper parts of the register.
18173 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
18175 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18176 DAG.getBitcast(MVT::v2f64, Load),
18177 DAG.getIntPtrConstant(0, dl));
18179 // Or the load with the bias.
18180 SDValue Or = DAG.getNode(
18181 ISD::OR, dl, MVT::v2i64,
18182 DAG.getBitcast(MVT::v2i64,
18183 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
18184 DAG.getBitcast(MVT::v2i64,
18185 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
18187 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18188 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
18190 // Subtract the bias.
18191 // TODO: Are there any fast-math-flags to propagate here?
18192 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
18194 // Handle final rounding.
18195 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
18198 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
18199 const X86Subtarget &Subtarget,
18201 if (Op.getSimpleValueType() != MVT::v2f64)
18204 SDValue N0 = Op.getOperand(0);
18205 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
18207 // Legalize to v4i32 type.
18208 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
18209 DAG.getUNDEF(MVT::v2i32));
18211 if (Subtarget.hasAVX512())
18212 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
18214 // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
18215 // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
18216 SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
18217 SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
18219 // Two to the power of half-word-size.
18220 SDValue TWOHW = DAG.getConstantFP((double)(1 << 16), DL, MVT::v2f64);
18222 // Clear upper part of LO, lower HI.
18223 SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
18224 SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
18226 SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
18227 fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
18228 SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
18230 // Add the two halves.
18231 return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
18234 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
18235 const X86Subtarget &Subtarget) {
18236 // The algorithm is the following:
18237 // #ifdef __SSE4_1__
18238 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18239 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18240 // (uint4) 0x53000000, 0xaa);
18242 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18243 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18245 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18246 // return (float4) lo + fhi;
18248 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
18249 // reassociate the two FADDs, and if we do that, the algorithm fails
18250 // spectacularly (PR24512).
18251 // FIXME: If we ever have some kind of Machine FMF, this should be marked
18252 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
18253 // there's also the MachineCombiner reassociations happening on Machine IR.
18254 if (DAG.getTarget().Options.UnsafeFPMath)
18258 SDValue V = Op->getOperand(0);
18259 MVT VecIntVT = V.getSimpleValueType();
18260 bool Is128 = VecIntVT == MVT::v4i32;
18261 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
18262 // If we convert to something else than the supported type, e.g., to v4f64,
18264 if (VecFloatVT != Op->getSimpleValueType(0))
18267 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
18268 "Unsupported custom type");
18270 // In the #idef/#else code, we have in common:
18271 // - The vector of constants:
18277 // Create the splat vector for 0x4b000000.
18278 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
18279 // Create the splat vector for 0x53000000.
18280 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
18282 // Create the right shift.
18283 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
18284 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
18287 if (Subtarget.hasSSE41()) {
18288 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
18289 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18290 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
18291 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
18292 // Low will be bitcasted right away, so do not bother bitcasting back to its
18294 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
18295 VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
18296 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18297 // (uint4) 0x53000000, 0xaa);
18298 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
18299 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
18300 // High will be bitcasted right away, so do not bother bitcasting back to
18301 // its original type.
18302 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
18303 VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
18305 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
18306 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18307 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
18308 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
18310 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18311 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
18314 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
18315 SDValue VecCstFAdd = DAG.getConstantFP(
18316 APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
18318 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18319 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
18320 // TODO: Are there any fast-math-flags to propagate here?
18322 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
18323 // return (float4) lo + fhi;
18324 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
18325 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
18328 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
18329 const X86Subtarget &Subtarget) {
18330 SDValue N0 = Op.getOperand(0);
18331 MVT SrcVT = N0.getSimpleValueType();
18334 switch (SrcVT.SimpleTy) {
18336 llvm_unreachable("Custom UINT_TO_FP is not supported!");
18338 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
18341 assert(!Subtarget.hasAVX512());
18342 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
18346 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
18347 SelectionDAG &DAG) const {
18348 SDValue N0 = Op.getOperand(0);
18350 auto PtrVT = getPointerTy(DAG.getDataLayout());
18352 if (Op.getSimpleValueType().isVector())
18353 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
18355 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18358 MVT SrcVT = N0.getSimpleValueType();
18359 MVT DstVT = Op.getSimpleValueType();
18361 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
18362 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
18363 // Conversions from unsigned i32 to f32/f64 are legal,
18364 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
18368 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18371 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
18372 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
18373 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
18374 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
18375 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
18378 // Make a 64-bit buffer, and use it to build an FILD.
18379 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
18380 if (SrcVT == MVT::i32) {
18381 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
18382 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
18383 StackSlot, MachinePointerInfo());
18384 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
18385 OffsetSlot, MachinePointerInfo());
18386 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
18390 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
18391 SDValue ValueToStore = Op.getOperand(0);
18392 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
18393 // Bitcasting to f64 here allows us to do a single 64-bit store from
18394 // an SSE register, avoiding the store forwarding penalty that would come
18395 // with two 32-bit stores.
18396 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18397 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18398 MachinePointerInfo());
18399 // For i64 source, we need to add the appropriate power of 2 if the input
18400 // was negative. This is the same as the optimization in
18401 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
18402 // we must be careful to do the computation in x87 extended precision, not
18403 // in SSE. (The generic code can't know it's OK to do this, or how to.)
18404 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
18405 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
18406 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18407 MachineMemOperand::MOLoad, 8, 8);
18409 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
18410 SDValue Ops[] = { Store, StackSlot };
18411 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
18414 APInt FF(32, 0x5F800000ULL);
18416 // Check whether the sign bit is set.
18417 SDValue SignSet = DAG.getSetCC(
18418 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
18419 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
18421 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
18422 SDValue FudgePtr = DAG.getConstantPool(
18423 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
18425 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
18426 SDValue Zero = DAG.getIntPtrConstant(0, dl);
18427 SDValue Four = DAG.getIntPtrConstant(4, dl);
18428 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
18429 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
18431 // Load the value out, extending it from f32 to f80.
18432 // FIXME: Avoid the extend by constructing the right constant pool?
18433 SDValue Fudge = DAG.getExtLoad(
18434 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
18435 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
18436 /* Alignment = */ 4);
18437 // Extend everything to 80 bits to force it to be done on x87.
18438 // TODO: Are there any fast-math-flags to propagate here?
18439 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
18440 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
18441 DAG.getIntPtrConstant(0, dl));
18444 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
18445 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
18446 // just return an SDValue().
18447 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
18448 // to i16, i32 or i64, and we lower it to a legal sequence and return the
18451 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
18452 bool IsSigned) const {
18455 EVT DstTy = Op.getValueType();
18456 EVT TheVT = Op.getOperand(0).getValueType();
18457 auto PtrVT = getPointerTy(DAG.getDataLayout());
18459 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
18460 // f16 must be promoted before using the lowering in this routine.
18461 // fp128 does not use this lowering.
18465 // If using FIST to compute an unsigned i64, we'll need some fixup
18466 // to handle values above the maximum signed i64. A FIST is always
18467 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
18468 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
18470 if (!IsSigned && DstTy != MVT::i64) {
18471 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
18472 // The low 32 bits of the fist result will have the correct uint32 result.
18473 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
18477 assert(DstTy.getSimpleVT() <= MVT::i64 &&
18478 DstTy.getSimpleVT() >= MVT::i16 &&
18479 "Unknown FP_TO_INT to lower!");
18481 // We lower FP->int64 into FISTP64 followed by a load from a temporary
18483 MachineFunction &MF = DAG.getMachineFunction();
18484 unsigned MemSize = DstTy.getStoreSize();
18485 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
18486 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18488 SDValue Chain = DAG.getEntryNode();
18489 SDValue Value = Op.getOperand(0);
18490 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
18492 if (UnsignedFixup) {
18494 // Conversion to unsigned i64 is implemented with a select,
18495 // depending on whether the source value fits in the range
18496 // of a signed i64. Let Thresh be the FP equivalent of
18497 // 0x8000000000000000ULL.
18499 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
18500 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
18501 // Fist-to-mem64 FistSrc
18502 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
18503 // to XOR'ing the high 32 bits with Adjust.
18505 // Being a power of 2, Thresh is exactly representable in all FP formats.
18506 // For X87 we'd like to use the smallest FP type for this constant, but
18507 // for DAG type consistency we have to match the FP operand type.
18509 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
18510 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
18511 bool LosesInfo = false;
18512 if (TheVT == MVT::f64)
18513 // The rounding mode is irrelevant as the conversion should be exact.
18514 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
18516 else if (TheVT == MVT::f80)
18517 Status = Thresh.convert(APFloat::x87DoubleExtended(),
18518 APFloat::rmNearestTiesToEven, &LosesInfo);
18520 assert(Status == APFloat::opOK && !LosesInfo &&
18521 "FP conversion should have been exact");
18523 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
18525 SDValue Cmp = DAG.getSetCC(DL,
18526 getSetCCResultType(DAG.getDataLayout(),
18527 *DAG.getContext(), TheVT),
18528 Value, ThreshVal, ISD::SETLT);
18529 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
18530 DAG.getConstant(0, DL, MVT::i64),
18531 DAG.getConstant(APInt::getSignMask(64),
18533 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
18534 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
18535 *DAG.getContext(), TheVT),
18536 Value, ThreshVal, ISD::SETLT);
18537 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
18540 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
18542 // FIXME This causes a redundant load/store if the SSE-class value is already
18543 // in memory, such as if it is on the callstack.
18544 if (isScalarFPTypeInSSEReg(TheVT)) {
18545 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
18546 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
18547 SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
18548 SDValue Ops[] = { Chain, StackSlot };
18550 unsigned FLDSize = TheVT.getStoreSize();
18551 assert(FLDSize <= MemSize && "Stack slot not big enough");
18552 MachineMemOperand *MMO = MF.getMachineMemOperand(
18553 MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
18554 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
18555 Chain = Value.getValue(1);
18558 // Build the FP_TO_INT*_IN_MEM
18559 MachineMemOperand *MMO = MF.getMachineMemOperand(
18560 MPI, MachineMemOperand::MOStore, MemSize, MemSize);
18561 SDValue Ops[] = { Chain, Value, StackSlot };
18562 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
18563 DAG.getVTList(MVT::Other),
18566 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
18568 // If we need an unsigned fixup, XOR the result with adjust.
18570 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
18575 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
18576 const X86Subtarget &Subtarget) {
18577 MVT VT = Op.getSimpleValueType();
18578 SDValue In = Op.getOperand(0);
18579 MVT InVT = In.getSimpleValueType();
18581 unsigned Opc = Op.getOpcode();
18583 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
18584 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
18585 "Unexpected extension opcode");
18586 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
18587 "Expected same number of elements");
18588 assert((VT.getVectorElementType() == MVT::i16 ||
18589 VT.getVectorElementType() == MVT::i32 ||
18590 VT.getVectorElementType() == MVT::i64) &&
18591 "Unexpected element type");
18592 assert((InVT.getVectorElementType() == MVT::i8 ||
18593 InVT.getVectorElementType() == MVT::i16 ||
18594 InVT.getVectorElementType() == MVT::i32) &&
18595 "Unexpected element type");
18597 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
18599 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
18600 if (InVT == MVT::v8i8) {
18601 if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
18604 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
18605 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
18606 return DAG.getNode(ExtendInVecOpc, dl, VT, In);
18609 if (Subtarget.hasInt256())
18612 // Optimize vectors in AVX mode:
18615 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
18616 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
18617 // Concat upper and lower parts.
18620 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
18621 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
18622 // Concat upper and lower parts.
18625 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
18626 VT.getVectorNumElements() / 2);
18628 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
18630 // Short-circuit if we can determine that each 128-bit half is the same value.
18631 // Otherwise, this is difficult to match and optimize.
18632 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
18633 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
18634 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
18636 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
18637 SDValue Undef = DAG.getUNDEF(InVT);
18638 bool NeedZero = Opc == ISD::ZERO_EXTEND;
18639 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
18640 OpHi = DAG.getBitcast(HalfVT, OpHi);
18642 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
18645 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
18646 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
18647 const SDLoc &dl, SelectionDAG &DAG) {
18648 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
18649 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
18650 DAG.getIntPtrConstant(0, dl));
18651 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
18652 DAG.getIntPtrConstant(8, dl));
18653 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
18654 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
18655 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
18656 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18659 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
18660 const X86Subtarget &Subtarget,
18661 SelectionDAG &DAG) {
18662 MVT VT = Op->getSimpleValueType(0);
18663 SDValue In = Op->getOperand(0);
18664 MVT InVT = In.getSimpleValueType();
18665 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
18667 unsigned NumElts = VT.getVectorNumElements();
18669 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
18670 // avoids a constant pool load.
18671 if (VT.getVectorElementType() != MVT::i8) {
18672 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
18673 return DAG.getNode(ISD::SRL, DL, VT, Extend,
18674 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
18677 // Extend VT if BWI is not supported.
18679 if (!Subtarget.hasBWI()) {
18680 // If v16i32 is to be avoided, we'll need to split and concatenate.
18681 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
18682 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
18684 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
18687 // Widen to 512-bits if VLX is not supported.
18688 MVT WideVT = ExtVT;
18689 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
18690 NumElts *= 512 / ExtVT.getSizeInBits();
18691 InVT = MVT::getVectorVT(MVT::i1, NumElts);
18692 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
18693 In, DAG.getIntPtrConstant(0, DL));
18694 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
18698 SDValue One = DAG.getConstant(1, DL, WideVT);
18699 SDValue Zero = DAG.getConstant(0, DL, WideVT);
18701 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
18703 // Truncate if we had to extend above.
18705 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
18706 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
18709 // Extract back to 128/256-bit if we widened.
18711 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
18712 DAG.getIntPtrConstant(0, DL));
18714 return SelectedVal;
18717 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
18718 SelectionDAG &DAG) {
18719 SDValue In = Op.getOperand(0);
18720 MVT SVT = In.getSimpleValueType();
18722 if (SVT.getVectorElementType() == MVT::i1)
18723 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
18725 assert(Subtarget.hasAVX() && "Expected AVX support");
18726 return LowerAVXExtend(Op, DAG, Subtarget);
18729 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
18730 /// It makes use of the fact that vectors with enough leading sign/zero bits
18731 /// prevent the PACKSS/PACKUS from saturating the results.
18732 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
18733 /// within each 128-bit lane.
18734 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
18735 const SDLoc &DL, SelectionDAG &DAG,
18736 const X86Subtarget &Subtarget) {
18737 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
18738 "Unexpected PACK opcode");
18739 assert(DstVT.isVector() && "VT not a vector?");
18741 // Requires SSE2 but AVX512 has fast vector truncate.
18742 if (!Subtarget.hasSSE2())
18745 EVT SrcVT = In.getValueType();
18747 // No truncation required, we might get here due to recursive calls.
18748 if (SrcVT == DstVT)
18751 // We only support vector truncation to 64bits or greater from a
18752 // 128bits or greater source.
18753 unsigned DstSizeInBits = DstVT.getSizeInBits();
18754 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
18755 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
18758 unsigned NumElems = SrcVT.getVectorNumElements();
18759 if (!isPowerOf2_32(NumElems))
18762 LLVMContext &Ctx = *DAG.getContext();
18763 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
18764 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
18766 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
18768 // Pack to the largest type possible:
18769 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
18770 EVT InVT = MVT::i16, OutVT = MVT::i8;
18771 if (SrcVT.getScalarSizeInBits() > 16 &&
18772 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
18777 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
18778 if (SrcVT.is128BitVector()) {
18779 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
18780 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
18781 In = DAG.getBitcast(InVT, In);
18782 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
18783 Res = extractSubVector(Res, 0, DAG, DL, 64);
18784 return DAG.getBitcast(DstVT, Res);
18787 // Extract lower/upper subvectors.
18788 unsigned NumSubElts = NumElems / 2;
18789 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
18790 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
18792 unsigned SubSizeInBits = SrcSizeInBits / 2;
18793 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
18794 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
18796 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
18797 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
18798 Lo = DAG.getBitcast(InVT, Lo);
18799 Hi = DAG.getBitcast(InVT, Hi);
18800 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
18801 return DAG.getBitcast(DstVT, Res);
18804 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
18805 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
18806 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
18807 Lo = DAG.getBitcast(InVT, Lo);
18808 Hi = DAG.getBitcast(InVT, Hi);
18809 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
18811 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
18812 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
18813 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
18814 SmallVector<int, 64> Mask;
18815 int Scale = 64 / OutVT.getScalarSizeInBits();
18816 scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
18817 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
18819 if (DstVT.is256BitVector())
18820 return DAG.getBitcast(DstVT, Res);
18822 // If 512bit -> 128bit truncate another stage.
18823 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
18824 Res = DAG.getBitcast(PackedVT, Res);
18825 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
18828 // Recursively pack lower/upper subvectors, concat result and pack again.
18829 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
18830 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
18831 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
18832 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
18834 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
18835 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
18836 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
18839 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
18840 const X86Subtarget &Subtarget) {
18843 MVT VT = Op.getSimpleValueType();
18844 SDValue In = Op.getOperand(0);
18845 MVT InVT = In.getSimpleValueType();
18847 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
18849 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
18850 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
18851 if (InVT.getScalarSizeInBits() <= 16) {
18852 if (Subtarget.hasBWI()) {
18853 // legal, will go to VPMOVB2M, VPMOVW2M
18854 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
18855 // We need to shift to get the lsb into sign position.
18856 // Shift packed bytes not supported natively, bitcast to word
18857 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
18858 In = DAG.getNode(ISD::SHL, DL, ExtVT,
18859 DAG.getBitcast(ExtVT, In),
18860 DAG.getConstant(ShiftInx, DL, ExtVT));
18861 In = DAG.getBitcast(InVT, In);
18863 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
18866 // Use TESTD/Q, extended vector to packed dword/qword.
18867 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
18868 "Unexpected vector type.");
18869 unsigned NumElts = InVT.getVectorNumElements();
18870 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
18871 // We need to change to a wider element type that we have support for.
18872 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
18873 // For 16 element vectors we extend to v16i32 unless we are explicitly
18874 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
18875 // we need to split into two 8 element vectors which we can extend to v8i32,
18876 // truncate and concat the results. There's an additional complication if
18877 // the original type is v16i8. In that case we can't split the v16i8 so
18878 // first we pre-extend it to v16i16 which we can split to v8i16, then extend
18879 // to v8i32, truncate that to v8i1 and concat the two halves.
18880 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
18881 if (InVT == MVT::v16i8) {
18882 // First we need to sign extend up to 256-bits so we can split that.
18883 InVT = MVT::v16i16;
18884 In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
18886 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
18887 SDValue Hi = extract128BitVector(In, 8, DAG, DL);
18888 // We're split now, just emit two truncates and a concat. The two
18889 // truncates will trigger legalization to come back to this function.
18890 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
18891 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
18892 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
18894 // We either have 8 elements or we're allowed to use 512-bit vectors.
18895 // If we have VLX, we want to use the narrowest vector that can get the
18896 // job done so we use vXi32.
18897 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
18898 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
18899 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
18901 ShiftInx = InVT.getScalarSizeInBits() - 1;
18904 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
18905 // We need to shift to get the lsb into sign position.
18906 In = DAG.getNode(ISD::SHL, DL, InVT, In,
18907 DAG.getConstant(ShiftInx, DL, InVT));
18909 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
18910 if (Subtarget.hasDQI())
18911 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
18912 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
18915 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
18917 MVT VT = Op.getSimpleValueType();
18918 SDValue In = Op.getOperand(0);
18919 MVT InVT = In.getSimpleValueType();
18920 unsigned InNumEltBits = InVT.getScalarSizeInBits();
18922 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
18923 "Invalid TRUNCATE operation");
18925 // If called by the legalizer just return.
18926 if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT))
18929 if (VT.getVectorElementType() == MVT::i1)
18930 return LowerTruncateVecI1(Op, DAG, Subtarget);
18932 // vpmovqb/w/d, vpmovdb/w, vpmovwb
18933 if (Subtarget.hasAVX512()) {
18934 // word to byte only under BWI. Otherwise we have to promoted to v16i32
18935 // and then truncate that. But we should only do that if we haven't been
18936 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
18937 // handled by isel patterns.
18938 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
18939 Subtarget.canExtendTo512DQ())
18943 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
18944 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
18946 // Truncate with PACKUS if we are truncating a vector with leading zero bits
18947 // that extend all the way to the packed/truncated value.
18948 // Pre-SSE41 we can only use PACKUSWB.
18949 KnownBits Known = DAG.computeKnownBits(In);
18950 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
18952 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
18955 // Truncate with PACKSS if we are truncating a vector with sign-bits that
18956 // extend all the way to the packed/truncated value.
18957 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
18959 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
18962 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
18963 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
18964 if (Subtarget.hasInt256()) {
18965 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
18966 In = DAG.getBitcast(MVT::v8i32, In);
18967 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
18968 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
18969 DAG.getIntPtrConstant(0, DL));
18972 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18973 DAG.getIntPtrConstant(0, DL));
18974 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18975 DAG.getIntPtrConstant(2, DL));
18976 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
18977 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
18978 static const int ShufMask[] = {0, 2, 4, 6};
18979 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
18982 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
18983 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
18984 if (Subtarget.hasInt256()) {
18985 In = DAG.getBitcast(MVT::v32i8, In);
18987 // The PSHUFB mask:
18988 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
18989 -1, -1, -1, -1, -1, -1, -1, -1,
18990 16, 17, 20, 21, 24, 25, 28, 29,
18991 -1, -1, -1, -1, -1, -1, -1, -1 };
18992 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
18993 In = DAG.getBitcast(MVT::v4i64, In);
18995 static const int ShufMask2[] = {0, 2, -1, -1};
18996 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
18997 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
18998 DAG.getIntPtrConstant(0, DL));
18999 return DAG.getBitcast(VT, In);
19002 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19003 DAG.getIntPtrConstant(0, DL));
19005 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19006 DAG.getIntPtrConstant(4, DL));
19008 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
19009 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
19011 // The PSHUFB mask:
19012 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
19013 -1, -1, -1, -1, -1, -1, -1, -1};
19015 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
19016 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
19018 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19019 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19021 // The MOVLHPS Mask:
19022 static const int ShufMask2[] = {0, 1, 4, 5};
19023 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
19024 return DAG.getBitcast(MVT::v8i16, res);
19027 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
19028 // Use an AND to zero uppper bits for PACKUS.
19029 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
19031 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19032 DAG.getIntPtrConstant(0, DL));
19033 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19034 DAG.getIntPtrConstant(8, DL));
19035 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
19038 // Handle truncation of V256 to V128 using shuffles.
19039 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19041 assert(Subtarget.hasAVX() && "256-bit vector without AVX!");
19043 unsigned NumElems = VT.getVectorNumElements();
19044 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
19046 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
19047 // Prepare truncation shuffle mask
19048 for (unsigned i = 0; i != NumElems; ++i)
19049 MaskVec[i] = i * 2;
19050 In = DAG.getBitcast(NVT, In);
19051 SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec);
19052 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
19053 DAG.getIntPtrConstant(0, DL));
19056 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
19057 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
19058 MVT VT = Op.getSimpleValueType();
19059 SDValue Src = Op.getOperand(0);
19060 MVT SrcVT = Src.getSimpleValueType();
19063 if (VT.isVector()) {
19064 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
19065 MVT ResVT = MVT::v4i32;
19066 MVT TruncVT = MVT::v4i1;
19067 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
19068 if (!IsSigned && !Subtarget.hasVLX()) {
19069 // Widen to 512-bits.
19070 ResVT = MVT::v8i32;
19071 TruncVT = MVT::v8i1;
19072 Opc = ISD::FP_TO_UINT;
19073 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
19074 DAG.getUNDEF(MVT::v8f64),
19075 Src, DAG.getIntPtrConstant(0, dl));
19077 SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
19078 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
19079 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
19080 DAG.getIntPtrConstant(0, dl));
19083 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
19084 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
19085 return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
19086 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
19087 DAG.getUNDEF(MVT::v2f32)));
19093 assert(!VT.isVector());
19095 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
19097 if (!IsSigned && Subtarget.hasAVX512()) {
19098 // Conversions from f32/f64 should be legal.
19102 // Use default expansion.
19103 if (VT == MVT::i64)
19107 // Promote i16 to i32 if we can use a SSE operation.
19108 if (VT == MVT::i16 && UseSSEReg) {
19109 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
19110 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
19111 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19114 // If this is a SINT_TO_FP using SSEReg we're done.
19115 if (UseSSEReg && IsSigned)
19118 // Fall back to X87.
19119 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
19122 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
19125 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
19127 MVT VT = Op.getSimpleValueType();
19128 SDValue In = Op.getOperand(0);
19129 MVT SVT = In.getSimpleValueType();
19131 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
19133 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
19134 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
19135 In, DAG.getUNDEF(SVT)));
19138 /// Horizontal vector math instructions may be slower than normal math with
19139 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19140 /// implementation, and likely shuffle complexity of the alternate sequence.
19141 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19142 const X86Subtarget &Subtarget) {
19143 bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
19144 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19145 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19148 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19149 /// vector operation in place of the typical scalar operation.
19150 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
19151 const X86Subtarget &Subtarget) {
19152 // If both operands have other uses, this is probably not profitable.
19153 SDValue LHS = Op.getOperand(0);
19154 SDValue RHS = Op.getOperand(1);
19155 if (!LHS.hasOneUse() && !RHS.hasOneUse())
19158 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
19159 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
19160 if (IsFP && !Subtarget.hasSSE3())
19162 if (!IsFP && !Subtarget.hasSSSE3())
19165 // Extract from a common vector.
19166 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19167 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19168 LHS.getOperand(0) != RHS.getOperand(0) ||
19169 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
19170 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
19171 !shouldUseHorizontalOp(true, DAG, Subtarget))
19174 // Allow commuted 'hadd' ops.
19175 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
19177 switch (Op.getOpcode()) {
19178 case ISD::ADD: HOpcode = X86ISD::HADD; break;
19179 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
19180 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
19181 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
19183 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
19185 unsigned LExtIndex = LHS.getConstantOperandVal(1);
19186 unsigned RExtIndex = RHS.getConstantOperandVal(1);
19187 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
19188 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
19189 std::swap(LExtIndex, RExtIndex);
19191 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
19194 SDValue X = LHS.getOperand(0);
19195 EVT VecVT = X.getValueType();
19196 unsigned BitWidth = VecVT.getSizeInBits();
19197 unsigned NumLanes = BitWidth / 128;
19198 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
19199 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
19200 "Not expecting illegal vector widths here");
19202 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
19203 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
19205 if (BitWidth == 256 || BitWidth == 512) {
19206 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
19207 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
19208 LExtIndex %= NumEltsPerLane;
19211 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
19212 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
19213 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
19214 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
19215 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
19216 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
19217 DAG.getIntPtrConstant(LExtIndex / 2, DL));
19220 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19221 /// vector operation in place of the typical scalar operation.
19222 static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
19223 const X86Subtarget &Subtarget) {
19224 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
19225 "Only expecting float/double");
19226 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
19229 /// The only differences between FABS and FNEG are the mask and the logic op.
19230 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
19231 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
19232 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
19233 "Wrong opcode for lowering FABS or FNEG.");
19235 bool IsFABS = (Op.getOpcode() == ISD::FABS);
19237 // If this is a FABS and it has an FNEG user, bail out to fold the combination
19238 // into an FNABS. We'll lower the FABS after that if it is still in use.
19240 for (SDNode *User : Op->uses())
19241 if (User->getOpcode() == ISD::FNEG)
19245 MVT VT = Op.getSimpleValueType();
19247 bool IsF128 = (VT == MVT::f128);
19248 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19249 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19250 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19251 "Unexpected type in LowerFABSorFNEG");
19253 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
19254 // decide if we should generate a 16-byte constant mask when we only need 4 or
19255 // 8 bytes for the scalar case.
19257 // There are no scalar bitwise logical SSE/AVX instructions, so we
19258 // generate a 16-byte vector constant and logic op even for the scalar case.
19259 // Using a 16-byte mask allows folding the load of the mask with
19260 // the logic op, so it can save (~4 bytes) on code size.
19261 bool IsFakeVector = !VT.isVector() && !IsF128;
19264 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19266 unsigned EltBits = VT.getScalarSizeInBits();
19267 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
19268 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
19269 APInt::getSignMask(EltBits);
19270 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19271 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
19273 SDValue Op0 = Op.getOperand(0);
19274 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
19275 unsigned LogicOp = IsFABS ? X86ISD::FAND :
19276 IsFNABS ? X86ISD::FOR :
19278 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
19280 if (VT.isVector() || IsF128)
19281 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19283 // For the scalar case extend to a 128-bit vector, perform the logic op,
19284 // and extract the scalar result back out.
19285 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
19286 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19287 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
19288 DAG.getIntPtrConstant(0, dl));
19291 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
19292 SDValue Mag = Op.getOperand(0);
19293 SDValue Sign = Op.getOperand(1);
19296 // If the sign operand is smaller, extend it first.
19297 MVT VT = Op.getSimpleValueType();
19298 if (Sign.getSimpleValueType().bitsLT(VT))
19299 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
19301 // And if it is bigger, shrink it first.
19302 if (Sign.getSimpleValueType().bitsGT(VT))
19303 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
19305 // At this point the operands and the result should have the same
19306 // type, and that won't be f80 since that is not custom lowered.
19307 bool IsF128 = (VT == MVT::f128);
19308 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19309 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19310 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19311 "Unexpected type in LowerFCOPYSIGN");
19313 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19315 // Perform all scalar logic operations as 16-byte vectors because there are no
19316 // scalar FP logic instructions in SSE.
19317 // TODO: This isn't necessary. If we used scalar types, we might avoid some
19318 // unnecessary splats, but we might miss load folding opportunities. Should
19319 // this decision be based on OptimizeForSize?
19320 bool IsFakeVector = !VT.isVector() && !IsF128;
19323 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19325 // The mask constants are automatically splatted for vector types.
19326 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19327 SDValue SignMask = DAG.getConstantFP(
19328 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
19329 SDValue MagMask = DAG.getConstantFP(
19330 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
19332 // First, clear all bits but the sign bit from the second operand (sign).
19334 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
19335 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
19337 // Next, clear the sign bit from the first operand (magnitude).
19338 // TODO: If we had general constant folding for FP logic ops, this check
19339 // wouldn't be necessary.
19341 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
19342 APFloat APF = Op0CN->getValueAPF();
19344 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
19346 // If the magnitude operand wasn't a constant, we need to AND out the sign.
19348 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
19349 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
19352 // OR the magnitude value with the sign bit.
19353 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
19354 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
19355 DAG.getIntPtrConstant(0, dl));
19358 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
19359 SDValue N0 = Op.getOperand(0);
19361 MVT VT = Op.getSimpleValueType();
19363 MVT OpVT = N0.getSimpleValueType();
19364 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
19365 "Unexpected type for FGETSIGN");
19367 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
19368 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
19369 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
19370 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
19371 Res = DAG.getZExtOrTrunc(Res, dl, VT);
19372 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
19376 /// Helper for creating a X86ISD::SETCC node.
19377 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
19378 SelectionDAG &DAG) {
19379 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19380 DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
19383 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
19384 /// style scalarized (associative) reduction patterns.
19385 static bool matchBitOpReduction(SDValue Op, ISD::NodeType BinOp,
19386 SmallVectorImpl<SDValue> &SrcOps) {
19387 SmallVector<SDValue, 8> Opnds;
19388 DenseMap<SDValue, APInt> SrcOpMap;
19389 EVT VT = MVT::Other;
19391 // Recognize a special case where a vector is casted into wide integer to
19393 assert(Op.getOpcode() == unsigned(BinOp) &&
19394 "Unexpected bit reduction opcode");
19395 Opnds.push_back(Op.getOperand(0));
19396 Opnds.push_back(Op.getOperand(1));
19398 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
19399 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
19400 // BFS traverse all BinOp operands.
19401 if (I->getOpcode() == unsigned(BinOp)) {
19402 Opnds.push_back(I->getOperand(0));
19403 Opnds.push_back(I->getOperand(1));
19404 // Re-evaluate the number of nodes to be traversed.
19405 e += 2; // 2 more nodes (LHS and RHS) are pushed.
19409 // Quit if a non-EXTRACT_VECTOR_ELT
19410 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19413 // Quit if without a constant index.
19414 SDValue Idx = I->getOperand(1);
19415 if (!isa<ConstantSDNode>(Idx))
19418 SDValue Src = I->getOperand(0);
19419 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
19420 if (M == SrcOpMap.end()) {
19421 VT = Src.getValueType();
19422 // Quit if not the same type.
19423 if (SrcOpMap.begin() != SrcOpMap.end() &&
19424 VT != SrcOpMap.begin()->first.getValueType())
19426 unsigned NumElts = VT.getVectorNumElements();
19427 APInt EltCount = APInt::getNullValue(NumElts);
19428 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
19429 SrcOps.push_back(Src);
19431 // Quit if element already used.
19432 unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
19433 if (M->second[CIdx])
19435 M->second.setBit(CIdx);
19438 // Quit if not all elements are used.
19439 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
19440 E = SrcOpMap.end();
19442 if (!I->second.isAllOnesValue())
19449 // Check whether an OR'd tree is PTEST-able.
19450 static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
19451 const X86Subtarget &Subtarget,
19452 SelectionDAG &DAG, SDValue &X86CC) {
19453 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
19455 if (!Subtarget.hasSSE41() || !Op->hasOneUse())
19458 SmallVector<SDValue, 8> VecIns;
19459 if (!matchBitOpReduction(Op, ISD::OR, VecIns))
19462 // Quit if not 128/256-bit vector.
19463 EVT VT = VecIns[0].getValueType();
19464 if (!VT.is128BitVector() && !VT.is256BitVector())
19468 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
19470 // Cast all vectors into TestVT for PTEST.
19471 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
19472 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
19474 // If more than one full vector is evaluated, OR them first before PTEST.
19475 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
19476 // Each iteration will OR 2 nodes and append the result until there is only
19477 // 1 node left, i.e. the final OR'd value of all vectors.
19478 SDValue LHS = VecIns[Slot];
19479 SDValue RHS = VecIns[Slot + 1];
19480 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
19483 X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, DL,
19485 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
19488 /// return true if \c Op has a use that doesn't just read flags.
19489 static bool hasNonFlagsUse(SDValue Op) {
19490 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
19492 SDNode *User = *UI;
19493 unsigned UOpNo = UI.getOperandNo();
19494 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
19495 // Look pass truncate.
19496 UOpNo = User->use_begin().getOperandNo();
19497 User = *User->use_begin();
19500 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
19501 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
19507 /// Emit nodes that will be selected as "test Op0,Op0", or something
19509 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
19510 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
19511 // CF and OF aren't always set the way we want. Determine which
19512 // of these we need.
19513 bool NeedCF = false;
19514 bool NeedOF = false;
19517 case X86::COND_A: case X86::COND_AE:
19518 case X86::COND_B: case X86::COND_BE:
19521 case X86::COND_G: case X86::COND_GE:
19522 case X86::COND_L: case X86::COND_LE:
19523 case X86::COND_O: case X86::COND_NO: {
19524 // Check if we really need to set the
19525 // Overflow flag. If NoSignedWrap is present
19526 // that is not actually needed.
19527 switch (Op->getOpcode()) {
19532 if (Op.getNode()->getFlags().hasNoSignedWrap())
19542 // See if we can use the EFLAGS value from the operand instead of
19543 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
19544 // we prove that the arithmetic won't overflow, we can't use OF or CF.
19545 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
19546 // Emit a CMP with 0, which is the TEST pattern.
19547 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
19548 DAG.getConstant(0, dl, Op.getValueType()));
19550 unsigned Opcode = 0;
19551 unsigned NumOperands = 0;
19553 SDValue ArithOp = Op;
19555 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
19556 // which may be the result of a CAST. We use the variable 'Op', which is the
19557 // non-casted variable when we check for possible users.
19558 switch (ArithOp.getOpcode()) {
19560 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
19561 // because a TEST instruction will be better.
19562 if (!hasNonFlagsUse(Op))
19570 // Transform to an x86-specific ALU node with flags if there is a chance of
19571 // using an RMW op or only the flags are used. Otherwise, leave
19572 // the node alone and emit a 'test' instruction.
19573 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
19574 UE = Op.getNode()->use_end(); UI != UE; ++UI)
19575 if (UI->getOpcode() != ISD::CopyToReg &&
19576 UI->getOpcode() != ISD::SETCC &&
19577 UI->getOpcode() != ISD::STORE)
19580 // Otherwise use a regular EFLAGS-setting instruction.
19581 switch (ArithOp.getOpcode()) {
19582 default: llvm_unreachable("unexpected operator!");
19583 case ISD::ADD: Opcode = X86ISD::ADD; break;
19584 case ISD::SUB: Opcode = X86ISD::SUB; break;
19585 case ISD::XOR: Opcode = X86ISD::XOR; break;
19586 case ISD::AND: Opcode = X86ISD::AND; break;
19587 case ISD::OR: Opcode = X86ISD::OR; break;
19597 return SDValue(Op.getNode(), 1);
19604 // Emit a CMP with 0, which is the TEST pattern.
19605 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
19606 DAG.getConstant(0, dl, Op.getValueType()));
19608 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
19609 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
19611 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
19612 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
19613 return SDValue(New.getNode(), 1);
19616 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
19618 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
19619 const SDLoc &dl, SelectionDAG &DAG) const {
19620 if (isNullConstant(Op1))
19621 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
19623 EVT CmpVT = Op0.getValueType();
19625 if (CmpVT.isFloatingPoint())
19626 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
19628 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
19629 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
19631 // Only promote the compare up to I32 if it is a 16 bit operation
19632 // with an immediate. 16 bit immediates are to be avoided.
19633 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
19634 !DAG.getMachineFunction().getFunction().hasMinSize()) {
19635 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
19636 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
19637 // Don't do this if the immediate can fit in 8-bits.
19638 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
19639 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
19640 unsigned ExtendOp =
19641 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
19642 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
19643 // For equality comparisons try to use SIGN_EXTEND if the input was
19644 // truncate from something with enough sign bits.
19645 if (Op0.getOpcode() == ISD::TRUNCATE) {
19646 SDValue In = Op0.getOperand(0);
19648 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
19650 ExtendOp = ISD::SIGN_EXTEND;
19651 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
19652 SDValue In = Op1.getOperand(0);
19654 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
19656 ExtendOp = ISD::SIGN_EXTEND;
19661 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
19662 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
19665 // Use SUB instead of CMP to enable CSE between SUB and CMP.
19666 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
19667 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
19668 return Sub.getValue(1);
19671 /// Convert a comparison if required by the subtarget.
19672 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
19673 SelectionDAG &DAG) const {
19674 // If the subtarget does not support the FUCOMI instruction, floating-point
19675 // comparisons have to be converted.
19676 if (Subtarget.hasCMov() ||
19677 Cmp.getOpcode() != X86ISD::CMP ||
19678 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
19679 !Cmp.getOperand(1).getValueType().isFloatingPoint())
19682 // The instruction selector will select an FUCOM instruction instead of
19683 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
19684 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
19685 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
19687 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
19688 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
19689 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
19690 DAG.getConstant(8, dl, MVT::i8));
19691 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
19693 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
19694 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
19695 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
19698 /// Check if replacement of SQRT with RSQRT should be disabled.
19699 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
19700 EVT VT = Op.getValueType();
19702 // We never want to use both SQRT and RSQRT instructions for the same input.
19703 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
19707 return Subtarget.hasFastVectorFSQRT();
19708 return Subtarget.hasFastScalarFSQRT();
19711 /// The minimum architected relative accuracy is 2^-12. We need one
19712 /// Newton-Raphson step to have a good float result (24 bits of precision).
19713 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
19714 SelectionDAG &DAG, int Enabled,
19715 int &RefinementSteps,
19716 bool &UseOneConstNR,
19717 bool Reciprocal) const {
19718 EVT VT = Op.getValueType();
19720 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
19721 // It is likely not profitable to do this for f64 because a double-precision
19722 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
19723 // instructions: convert to single, rsqrtss, convert back to double, refine
19724 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
19725 // along with FMA, this could be a throughput win.
19726 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
19727 // after legalize types.
19728 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
19729 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
19730 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
19731 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
19732 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
19733 if (RefinementSteps == ReciprocalEstimate::Unspecified)
19734 RefinementSteps = 1;
19736 UseOneConstNR = false;
19737 // There is no FSQRT for 512-bits, but there is RSQRT14.
19738 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
19739 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
19744 /// The minimum architected relative accuracy is 2^-12. We need one
19745 /// Newton-Raphson step to have a good float result (24 bits of precision).
19746 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
19748 int &RefinementSteps) const {
19749 EVT VT = Op.getValueType();
19751 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
19752 // It is likely not profitable to do this for f64 because a double-precision
19753 // reciprocal estimate with refinement on x86 prior to FMA requires
19754 // 15 instructions: convert to single, rcpss, convert back to double, refine
19755 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
19756 // along with FMA, this could be a throughput win.
19758 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
19759 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
19760 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
19761 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
19762 // Enable estimate codegen with 1 refinement step for vector division.
19763 // Scalar division estimates are disabled because they break too much
19764 // real-world code. These defaults are intended to match GCC behavior.
19765 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
19768 if (RefinementSteps == ReciprocalEstimate::Unspecified)
19769 RefinementSteps = 1;
19771 // There is no FSQRT for 512-bits, but there is RCP14.
19772 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
19773 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
19778 /// If we have at least two divisions that use the same divisor, convert to
19779 /// multiplication by a reciprocal. This may need to be adjusted for a given
19780 /// CPU if a division's cost is not at least twice the cost of a multiplication.
19781 /// This is because we still need one division to calculate the reciprocal and
19782 /// then we need two multiplies by that reciprocal as replacements for the
19783 /// original divisions.
19784 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
19788 /// Result of 'and' is compared against zero. Change to a BT node if possible.
19789 /// Returns the BT node and the condition code needed to use it.
19790 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
19791 const SDLoc &dl, SelectionDAG &DAG,
19793 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
19794 SDValue Op0 = And.getOperand(0);
19795 SDValue Op1 = And.getOperand(1);
19796 if (Op0.getOpcode() == ISD::TRUNCATE)
19797 Op0 = Op0.getOperand(0);
19798 if (Op1.getOpcode() == ISD::TRUNCATE)
19799 Op1 = Op1.getOperand(0);
19801 SDValue Src, BitNo;
19802 if (Op1.getOpcode() == ISD::SHL)
19803 std::swap(Op0, Op1);
19804 if (Op0.getOpcode() == ISD::SHL) {
19805 if (isOneConstant(Op0.getOperand(0))) {
19806 // If we looked past a truncate, check that it's only truncating away
19808 unsigned BitWidth = Op0.getValueSizeInBits();
19809 unsigned AndBitWidth = And.getValueSizeInBits();
19810 if (BitWidth > AndBitWidth) {
19811 KnownBits Known = DAG.computeKnownBits(Op0);
19812 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
19816 BitNo = Op0.getOperand(1);
19818 } else if (Op1.getOpcode() == ISD::Constant) {
19819 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
19820 uint64_t AndRHSVal = AndRHS->getZExtValue();
19821 SDValue AndLHS = Op0;
19823 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
19824 Src = AndLHS.getOperand(0);
19825 BitNo = AndLHS.getOperand(1);
19827 // Use BT if the immediate can't be encoded in a TEST instruction or we
19828 // are optimizing for size and the immedaite won't fit in a byte.
19829 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
19830 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
19831 isPowerOf2_64(AndRHSVal)) {
19833 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
19834 Src.getValueType());
19839 // No patterns found, give up.
19840 if (!Src.getNode())
19843 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
19844 // instruction. Since the shift amount is in-range-or-undefined, we know
19845 // that doing a bittest on the i32 value is ok. We extend to i32 because
19846 // the encoding for the i16 version is larger than the i32 version.
19847 // Also promote i16 to i32 for performance / code size reason.
19848 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
19849 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
19851 // See if we can use the 32-bit instruction instead of the 64-bit one for a
19852 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
19853 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
19854 // known to be zero.
19855 if (Src.getValueType() == MVT::i64 &&
19856 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
19857 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
19859 // If the operand types disagree, extend the shift amount to match. Since
19860 // BT ignores high bits (like shifts) we can use anyextend.
19861 if (Src.getValueType() != BitNo.getValueType())
19862 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
19864 X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
19866 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
19869 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
19871 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
19876 // SSE Condition code mapping:
19885 switch (SetCCOpcode) {
19886 default: llvm_unreachable("Unexpected SETCC condition");
19888 case ISD::SETEQ: SSECC = 0; break;
19890 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
19892 case ISD::SETOLT: SSECC = 1; break;
19894 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
19896 case ISD::SETOLE: SSECC = 2; break;
19897 case ISD::SETUO: SSECC = 3; break;
19899 case ISD::SETNE: SSECC = 4; break;
19900 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
19901 case ISD::SETUGE: SSECC = 5; break;
19902 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
19903 case ISD::SETUGT: SSECC = 6; break;
19904 case ISD::SETO: SSECC = 7; break;
19905 case ISD::SETUEQ: SSECC = 8; break;
19906 case ISD::SETONE: SSECC = 12; break;
19909 std::swap(Op0, Op1);
19914 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
19915 /// concatenate the result back.
19916 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
19917 MVT VT = Op.getSimpleValueType();
19919 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
19920 "Unsupported value type for operation");
19922 unsigned NumElems = VT.getVectorNumElements();
19924 SDValue CC = Op.getOperand(2);
19926 // Extract the LHS vectors
19927 SDValue LHS = Op.getOperand(0);
19928 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
19929 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
19931 // Extract the RHS vectors
19932 SDValue RHS = Op.getOperand(1);
19933 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
19934 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
19936 // Issue the operation on the smaller types and concatenate the result back
19937 MVT EltVT = VT.getVectorElementType();
19938 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19939 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
19940 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
19941 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
19944 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
19946 SDValue Op0 = Op.getOperand(0);
19947 SDValue Op1 = Op.getOperand(1);
19948 SDValue CC = Op.getOperand(2);
19949 MVT VT = Op.getSimpleValueType();
19952 assert(VT.getVectorElementType() == MVT::i1 &&
19953 "Cannot set masked compare for this operation");
19955 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
19957 // If this is a seteq make sure any build vectors of all zeros are on the RHS.
19958 // This helps with vptestm matching.
19959 // TODO: Should we just canonicalize the setcc during DAG combine?
19960 if ((SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE) &&
19961 ISD::isBuildVectorAllZeros(Op0.getNode()))
19962 std::swap(Op0, Op1);
19964 // Prefer SETGT over SETLT.
19965 if (SetCCOpcode == ISD::SETLT) {
19966 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
19967 std::swap(Op0, Op1);
19970 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
19973 /// Given a buildvector constant, return a new vector constant with each element
19974 /// incremented or decremented. If incrementing or decrementing would result in
19975 /// unsigned overflow or underflow or this is not a simple vector constant,
19976 /// return an empty value.
19977 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
19978 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
19982 MVT VT = V.getSimpleValueType();
19983 MVT EltVT = VT.getVectorElementType();
19984 unsigned NumElts = VT.getVectorNumElements();
19985 SmallVector<SDValue, 8> NewVecC;
19987 for (unsigned i = 0; i < NumElts; ++i) {
19988 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
19989 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
19992 // Avoid overflow/underflow.
19993 const APInt &EltC = Elt->getAPIntValue();
19994 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
19997 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
20000 return DAG.getBuildVector(VT, DL, NewVecC);
20003 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
20005 /// t = psubus Op0, Op1
20006 /// pcmpeq t, <0..0>
20007 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
20008 ISD::CondCode Cond, const SDLoc &dl,
20009 const X86Subtarget &Subtarget,
20010 SelectionDAG &DAG) {
20011 if (!Subtarget.hasSSE2())
20014 MVT VET = VT.getVectorElementType();
20015 if (VET != MVT::i8 && VET != MVT::i16)
20021 case ISD::SETULT: {
20022 // If the comparison is against a constant we can turn this into a
20023 // setule. With psubus, setule does not require a swap. This is
20024 // beneficial because the constant in the register is no longer
20025 // destructed as the destination so it can be hoisted out of a loop.
20026 // Only do this pre-AVX since vpcmp* is no longer destructive.
20027 if (Subtarget.hasAVX())
20029 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, false);
20035 case ISD::SETUGT: {
20036 // If the comparison is against a constant, we can turn this into a setuge.
20037 // This is beneficial because materializing a constant 0 for the PCMPEQ is
20038 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
20039 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
20040 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, true);
20047 // Psubus is better than flip-sign because it requires no inversion.
20049 std::swap(Op0, Op1);
20055 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
20056 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
20057 DAG.getConstant(0, dl, VT));
20060 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
20061 SelectionDAG &DAG) {
20062 SDValue Op0 = Op.getOperand(0);
20063 SDValue Op1 = Op.getOperand(1);
20064 SDValue CC = Op.getOperand(2);
20065 MVT VT = Op.getSimpleValueType();
20066 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
20067 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
20072 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
20073 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
20077 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
20078 assert(VT.getVectorNumElements() <= 16);
20079 Opc = X86ISD::CMPM;
20081 Opc = X86ISD::CMPP;
20082 // The SSE/AVX packed FP comparison nodes are defined with a
20083 // floating-point vector result that matches the operand type. This allows
20084 // them to work with an SSE1 target (integer vector types are not legal).
20085 VT = Op0.getSimpleValueType();
20088 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
20089 // emit two comparisons and a logic op to tie them together.
20091 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
20092 if (SSECC >= 8 && !Subtarget.hasAVX()) {
20093 // LLVM predicate is SETUEQ or SETONE.
20095 unsigned CombineOpc;
20096 if (Cond == ISD::SETUEQ) {
20099 CombineOpc = X86ISD::FOR;
20101 assert(Cond == ISD::SETONE);
20104 CombineOpc = X86ISD::FAND;
20107 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20108 DAG.getConstant(CC0, dl, MVT::i8));
20109 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20110 DAG.getConstant(CC1, dl, MVT::i8));
20111 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
20113 // Handle all other FP comparisons here.
20114 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
20115 DAG.getConstant(SSECC, dl, MVT::i8));
20118 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
20119 // result type of SETCC. The bitcast is expected to be optimized away
20120 // during combining/isel.
20121 if (Opc == X86ISD::CMPP)
20122 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
20127 MVT VTOp0 = Op0.getSimpleValueType();
20128 assert(VTOp0 == Op1.getSimpleValueType() &&
20129 "Expected operands with same type!");
20130 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
20131 "Invalid number of packed elements for source and destination!");
20133 // This is being called by type legalization because v2i32 is marked custom
20134 // for result type legalization for v2f32.
20135 if (VTOp0 == MVT::v2i32)
20138 // The non-AVX512 code below works under the assumption that source and
20139 // destination types are the same.
20140 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
20141 "Value types for source and destination must be the same!");
20143 // The result is boolean, but operands are int/float
20144 if (VT.getVectorElementType() == MVT::i1) {
20145 // In AVX-512 architecture setcc returns mask with i1 elements,
20146 // But there is no compare instruction for i8 and i16 elements in KNL.
20147 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
20148 "Unexpected operand type");
20149 return LowerIntVSETCC_AVX512(Op, DAG);
20152 // Lower using XOP integer comparisons.
20153 if (VT.is128BitVector() && Subtarget.hasXOP()) {
20154 // Translate compare code to XOP PCOM compare mode.
20155 unsigned CmpMode = 0;
20157 default: llvm_unreachable("Unexpected SETCC condition");
20159 case ISD::SETLT: CmpMode = 0x00; break;
20161 case ISD::SETLE: CmpMode = 0x01; break;
20163 case ISD::SETGT: CmpMode = 0x02; break;
20165 case ISD::SETGE: CmpMode = 0x03; break;
20166 case ISD::SETEQ: CmpMode = 0x04; break;
20167 case ISD::SETNE: CmpMode = 0x05; break;
20170 // Are we comparing unsigned or signed integers?
20172 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
20174 return DAG.getNode(Opc, dl, VT, Op0, Op1,
20175 DAG.getConstant(CmpMode, dl, MVT::i8));
20178 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
20179 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
20180 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
20181 SDValue BC0 = peekThroughBitcasts(Op0);
20182 if (BC0.getOpcode() == ISD::AND) {
20184 SmallVector<APInt, 64> EltBits;
20185 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
20186 VT.getScalarSizeInBits(), UndefElts,
20187 EltBits, false, false)) {
20188 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
20190 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
20196 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
20197 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
20198 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
20199 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
20200 if (C1 && C1->getAPIntValue().isPowerOf2()) {
20201 unsigned BitWidth = VT.getScalarSizeInBits();
20202 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
20204 SDValue Result = Op0.getOperand(0);
20205 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
20206 DAG.getConstant(ShiftAmt, dl, VT));
20207 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
20208 DAG.getConstant(BitWidth - 1, dl, VT));
20213 // Break 256-bit integer vector compare into smaller ones.
20214 if (VT.is256BitVector() && !Subtarget.hasInt256())
20215 return Lower256IntVSETCC(Op, DAG);
20217 // If this is a SETNE against the signed minimum value, change it to SETGT.
20218 // If this is a SETNE against the signed maximum value, change it to SETLT.
20219 // which will be swapped to SETGT.
20220 // Otherwise we use PCMPEQ+invert.
20222 if (Cond == ISD::SETNE &&
20223 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
20224 if (ConstValue.isMinSignedValue())
20226 else if (ConstValue.isMaxSignedValue())
20230 // If both operands are known non-negative, then an unsigned compare is the
20231 // same as a signed compare and there's no need to flip signbits.
20232 // TODO: We could check for more general simplifications here since we're
20233 // computing known bits.
20234 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
20235 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
20237 // Special case: Use min/max operations for unsigned compares.
20238 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20239 if (ISD::isUnsignedIntSetCC(Cond) &&
20240 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
20241 TLI.isOperationLegal(ISD::UMIN, VT)) {
20242 // If we have a constant operand, increment/decrement it and change the
20243 // condition to avoid an invert.
20244 if (Cond == ISD::SETUGT &&
20245 ISD::matchUnaryPredicate(Op1, [](ConstantSDNode *C) {
20246 return !C->getAPIntValue().isMaxValue();
20248 // X > C --> X >= (C+1) --> X == umax(X, C+1)
20249 Op1 = DAG.getNode(ISD::ADD, dl, VT, Op1, DAG.getConstant(1, dl, VT));
20250 Cond = ISD::SETUGE;
20252 if (Cond == ISD::SETULT &&
20253 ISD::matchUnaryPredicate(Op1, [](ConstantSDNode *C) {
20254 return !C->getAPIntValue().isNullValue();
20256 // X < C --> X <= (C-1) --> X == umin(X, C-1)
20257 Op1 = DAG.getNode(ISD::SUB, dl, VT, Op1, DAG.getConstant(1, dl, VT));
20258 Cond = ISD::SETULE;
20260 bool Invert = false;
20263 default: llvm_unreachable("Unexpected condition code");
20264 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
20265 case ISD::SETULE: Opc = ISD::UMIN; break;
20266 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
20267 case ISD::SETUGE: Opc = ISD::UMAX; break;
20270 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20271 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
20273 // If the logical-not of the result is required, perform that now.
20275 Result = DAG.getNOT(dl, Result, VT);
20280 // Try to use SUBUS and PCMPEQ.
20281 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
20284 // We are handling one of the integer comparisons here. Since SSE only has
20285 // GT and EQ comparisons for integer, swapping operands and multiple
20286 // operations may be required for some comparisons.
20287 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
20289 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
20290 Cond == ISD::SETGE || Cond == ISD::SETUGE;
20291 bool Invert = Cond == ISD::SETNE ||
20292 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
20295 std::swap(Op0, Op1);
20297 // Check that the operation in question is available (most are plain SSE2,
20298 // but PCMPGTQ and PCMPEQQ have different requirements).
20299 if (VT == MVT::v2i64) {
20300 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
20301 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
20303 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20304 // bits of the inputs before performing those operations. The lower
20305 // compare is always unsigned.
20308 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
20310 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
20312 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
20313 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
20315 // Cast everything to the right type.
20316 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20317 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20319 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
20320 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
20321 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
20323 // Create masks for only the low parts/high parts of the 64 bit integers.
20324 static const int MaskHi[] = { 1, 1, 3, 3 };
20325 static const int MaskLo[] = { 0, 0, 2, 2 };
20326 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
20327 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
20328 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
20330 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
20331 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
20334 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20336 return DAG.getBitcast(VT, Result);
20339 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
20340 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
20341 // pcmpeqd + pshufd + pand.
20342 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
20344 // First cast everything to the right type.
20345 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20346 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20349 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
20351 // Make sure the lower and upper halves are both all-ones.
20352 static const int Mask[] = { 1, 0, 3, 2 };
20353 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
20354 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
20357 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20359 return DAG.getBitcast(VT, Result);
20363 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20364 // bits of the inputs before performing those operations.
20366 MVT EltVT = VT.getVectorElementType();
20367 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
20369 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
20370 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
20373 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20375 // If the logical-not of the result is required, perform that now.
20377 Result = DAG.getNOT(dl, Result, VT);
20382 // Try to select this as a KORTEST+SETCC if possible.
20383 static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
20384 const SDLoc &dl, SelectionDAG &DAG,
20385 const X86Subtarget &Subtarget,
20387 // Only support equality comparisons.
20388 if (CC != ISD::SETEQ && CC != ISD::SETNE)
20391 // Must be a bitcast from vXi1.
20392 if (Op0.getOpcode() != ISD::BITCAST)
20395 Op0 = Op0.getOperand(0);
20396 MVT VT = Op0.getSimpleValueType();
20397 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
20398 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
20399 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
20402 X86::CondCode X86Cond;
20403 if (isNullConstant(Op1)) {
20404 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
20405 } else if (isAllOnesConstant(Op1)) {
20406 // C flag is set for all ones.
20407 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
20411 // If the input is an OR, we can combine it's operands into the KORTEST.
20414 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
20415 LHS = Op0.getOperand(0);
20416 RHS = Op0.getOperand(1);
20419 X86CC = DAG.getConstant(X86Cond, dl, MVT::i8);
20420 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
20423 /// Emit flags for the given setcc condition and operands. Also returns the
20424 /// corresponding X86 condition code constant in X86CC.
20425 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
20426 ISD::CondCode CC, const SDLoc &dl,
20428 SDValue &X86CC) const {
20429 // Optimize to BT if possible.
20430 // Lower (X & (1 << N)) == 0 to BT(X, N).
20431 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
20432 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
20433 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
20434 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20435 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
20439 // Try to use PTEST for a tree ORs equality compared with 0.
20440 // TODO: We could do AND tree with all 1s as well by using the C flag.
20441 if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
20442 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20443 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
20447 // Try to lower using KORTEST.
20448 if (SDValue KORTEST = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
20451 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
20453 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
20454 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20455 // If the input is a setcc, then reuse the input setcc or use a new one with
20456 // the inverted condition.
20457 if (Op0.getOpcode() == X86ISD::SETCC) {
20458 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
20460 X86CC = Op0.getOperand(0);
20462 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
20463 CCode = X86::GetOppositeBranchCondition(CCode);
20464 X86CC = DAG.getConstant(CCode, dl, MVT::i8);
20467 return Op0.getOperand(1);
20471 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
20472 X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
20473 if (CondCode == X86::COND_INVALID)
20476 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
20477 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
20478 X86CC = DAG.getConstant(CondCode, dl, MVT::i8);
20482 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
20484 MVT VT = Op.getSimpleValueType();
20486 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
20488 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
20489 SDValue Op0 = Op.getOperand(0);
20490 SDValue Op1 = Op.getOperand(1);
20492 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
20495 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
20499 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
20502 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
20503 SDValue LHS = Op.getOperand(0);
20504 SDValue RHS = Op.getOperand(1);
20505 SDValue Carry = Op.getOperand(2);
20506 SDValue Cond = Op.getOperand(3);
20509 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
20510 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
20512 // Recreate the carry if needed.
20513 EVT CarryVT = Carry.getValueType();
20514 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
20515 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
20516 Carry, DAG.getConstant(NegOne, DL, CarryVT));
20518 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
20519 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
20520 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
20523 // This function returns three things: the arithmetic computation itself
20524 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
20525 // flag and the condition code define the case in which the arithmetic
20526 // computation overflows.
20527 static std::pair<SDValue, SDValue>
20528 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
20529 assert(Op.getResNo() == 0 && "Unexpected result number!");
20530 SDValue Value, Overflow;
20531 SDValue LHS = Op.getOperand(0);
20532 SDValue RHS = Op.getOperand(1);
20533 unsigned BaseOp = 0;
20535 switch (Op.getOpcode()) {
20536 default: llvm_unreachable("Unknown ovf instruction!");
20538 BaseOp = X86ISD::ADD;
20539 Cond = X86::COND_O;
20542 BaseOp = X86ISD::ADD;
20543 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
20546 BaseOp = X86ISD::SUB;
20547 Cond = X86::COND_O;
20550 BaseOp = X86ISD::SUB;
20551 Cond = X86::COND_B;
20554 BaseOp = X86ISD::SMUL;
20555 Cond = X86::COND_O;
20558 BaseOp = X86ISD::UMUL;
20559 Cond = X86::COND_O;
20564 // Also sets EFLAGS.
20565 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20566 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
20567 Overflow = Value.getValue(1);
20570 return std::make_pair(Value, Overflow);
20573 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
20574 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
20575 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
20576 // looks for this combo and may remove the "setcc" instruction if the "setcc"
20577 // has only one use.
20579 X86::CondCode Cond;
20580 SDValue Value, Overflow;
20581 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
20583 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
20584 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
20585 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
20588 /// Return true if opcode is a X86 logical comparison.
20589 static bool isX86LogicalCmp(SDValue Op) {
20590 unsigned Opc = Op.getOpcode();
20591 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
20592 Opc == X86ISD::SAHF)
20594 if (Op.getResNo() == 1 &&
20595 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
20596 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
20597 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
20603 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
20604 if (V.getOpcode() != ISD::TRUNCATE)
20607 SDValue VOp0 = V.getOperand(0);
20608 unsigned InBits = VOp0.getValueSizeInBits();
20609 unsigned Bits = V.getValueSizeInBits();
20610 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
20613 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
20614 bool AddTest = true;
20615 SDValue Cond = Op.getOperand(0);
20616 SDValue Op1 = Op.getOperand(1);
20617 SDValue Op2 = Op.getOperand(2);
20619 MVT VT = Op1.getSimpleValueType();
20622 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
20623 // are available or VBLENDV if AVX is available.
20624 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
20625 if (Cond.getOpcode() == ISD::SETCC &&
20626 ((Subtarget.hasSSE2() && VT == MVT::f64) ||
20627 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
20628 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
20629 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
20630 unsigned SSECC = translateX86FSETCC(
20631 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
20633 if (Subtarget.hasAVX512()) {
20634 SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
20635 CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
20636 assert(!VT.isVector() && "Not a scalar type?");
20637 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
20640 if (SSECC < 8 || Subtarget.hasAVX()) {
20641 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
20642 DAG.getConstant(SSECC, DL, MVT::i8));
20644 // If we have AVX, we can use a variable vector select (VBLENDV) instead
20645 // of 3 logic instructions for size savings and potentially speed.
20646 // Unfortunately, there is no scalar form of VBLENDV.
20648 // If either operand is a +0.0 constant, don't try this. We can expect to
20649 // optimize away at least one of the logic instructions later in that
20650 // case, so that sequence would be faster than a variable blend.
20652 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
20653 // uses XMM0 as the selection register. That may need just as many
20654 // instructions as the AND/ANDN/OR sequence due to register moves, so
20656 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
20657 !isNullFPConstant(Op2)) {
20658 // Convert to vectors, do a VSELECT, and convert back to scalar.
20659 // All of the conversions should be optimized away.
20660 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
20661 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
20662 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
20663 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
20665 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
20666 VCmp = DAG.getBitcast(VCmpVT, VCmp);
20668 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
20670 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
20671 VSel, DAG.getIntPtrConstant(0, DL));
20673 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
20674 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
20675 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
20679 // AVX512 fallback is to lower selects of scalar floats to masked moves.
20680 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
20681 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
20682 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
20685 // For v64i1 without 64-bit support we need to split and rejoin.
20686 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
20687 assert(Subtarget.hasBWI() && "Expected BWI to be legal");
20688 SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
20689 SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
20690 SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
20691 SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
20692 SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
20693 SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
20694 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20697 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
20699 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
20700 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
20701 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
20702 Op1Scalar = Op1.getOperand(0);
20704 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
20705 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
20706 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
20707 Op2Scalar = Op2.getOperand(0);
20708 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
20709 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
20710 Op1Scalar, Op2Scalar);
20711 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
20712 return DAG.getBitcast(VT, newSelect);
20713 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
20714 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
20715 DAG.getIntPtrConstant(0, DL));
20719 if (Cond.getOpcode() == ISD::SETCC) {
20720 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
20722 // If the condition was updated, it's possible that the operands of the
20723 // select were also updated (for example, EmitTest has a RAUW). Refresh
20724 // the local references to the select operands in case they got stale.
20725 Op1 = Op.getOperand(1);
20726 Op2 = Op.getOperand(2);
20730 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
20731 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
20732 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
20733 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
20734 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
20735 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
20736 if (Cond.getOpcode() == X86ISD::SETCC &&
20737 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
20738 isNullConstant(Cond.getOperand(1).getOperand(1))) {
20739 SDValue Cmp = Cond.getOperand(1);
20740 unsigned CondCode =
20741 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
20743 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
20744 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
20745 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
20746 SDValue CmpOp0 = Cmp.getOperand(0);
20748 // Apply further optimizations for special cases
20749 // (select (x != 0), -1, 0) -> neg & sbb
20750 // (select (x == 0), 0, -1) -> neg & sbb
20751 if (isNullConstant(Y) &&
20752 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
20753 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
20754 SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
20755 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20756 Zero = DAG.getConstant(0, DL, Op.getValueType());
20757 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
20760 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
20761 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
20762 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
20764 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20765 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
20766 SDValue Res = // Res = 0 or -1.
20767 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
20769 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
20770 Res = DAG.getNOT(DL, Res, Res.getValueType());
20772 if (!isNullConstant(Op2))
20773 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
20775 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
20776 Cmp.getOperand(0).getOpcode() == ISD::AND &&
20777 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
20778 SDValue CmpOp0 = Cmp.getOperand(0);
20779 SDValue Src1, Src2;
20780 // true if Op2 is XOR or OR operator and one of its operands
20782 // ( a , a op b) || ( b , a op b)
20783 auto isOrXorPattern = [&]() {
20784 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
20785 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
20787 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
20794 if (isOrXorPattern()) {
20796 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
20797 // we need mask of all zeros or ones with same size of the other
20799 if (CmpSz > VT.getSizeInBits())
20800 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
20801 else if (CmpSz < VT.getSizeInBits())
20802 Neg = DAG.getNode(ISD::AND, DL, VT,
20803 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
20804 DAG.getConstant(1, DL, VT));
20807 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
20808 Neg); // -(and (x, 0x1))
20809 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
20810 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
20815 // Look past (and (setcc_carry (cmp ...)), 1).
20816 if (Cond.getOpcode() == ISD::AND &&
20817 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
20818 isOneConstant(Cond.getOperand(1)))
20819 Cond = Cond.getOperand(0);
20821 // If condition flag is set by a X86ISD::CMP, then use it as the condition
20822 // setting operand in place of the X86ISD::SETCC.
20823 unsigned CondOpcode = Cond.getOpcode();
20824 if (CondOpcode == X86ISD::SETCC ||
20825 CondOpcode == X86ISD::SETCC_CARRY) {
20826 CC = Cond.getOperand(0);
20828 SDValue Cmp = Cond.getOperand(1);
20829 MVT VT = Op.getSimpleValueType();
20831 bool IllegalFPCMov = false;
20832 if (VT.isFloatingPoint() && !VT.isVector() &&
20833 !isScalarFPTypeInSSEReg(VT)) // FPStack?
20834 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
20836 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
20837 Cmp.getOpcode() == X86ISD::BT) { // FIXME
20841 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
20842 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
20843 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
20845 X86::CondCode X86Cond;
20846 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
20848 CC = DAG.getConstant(X86Cond, DL, MVT::i8);
20853 // Look past the truncate if the high bits are known zero.
20854 if (isTruncWithZeroHighBitsInput(Cond, DAG))
20855 Cond = Cond.getOperand(0);
20857 // We know the result of AND is compared against zero. Try to match
20859 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
20861 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
20870 CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
20871 Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
20872 X86::COND_NE, DL, DAG);
20875 // a < b ? -1 : 0 -> RES = ~setcc_carry
20876 // a < b ? 0 : -1 -> RES = setcc_carry
20877 // a >= b ? -1 : 0 -> RES = setcc_carry
20878 // a >= b ? 0 : -1 -> RES = ~setcc_carry
20879 if (Cond.getOpcode() == X86ISD::SUB) {
20880 Cond = ConvertCmpIfNecessary(Cond, DAG);
20881 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
20883 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
20884 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
20885 (isNullConstant(Op1) || isNullConstant(Op2))) {
20886 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
20887 DAG.getConstant(X86::COND_B, DL, MVT::i8),
20889 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
20890 return DAG.getNOT(DL, Res, Res.getValueType());
20895 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
20896 // widen the cmov and push the truncate through. This avoids introducing a new
20897 // branch during isel and doesn't add any extensions.
20898 if (Op.getValueType() == MVT::i8 &&
20899 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
20900 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
20901 if (T1.getValueType() == T2.getValueType() &&
20902 // Blacklist CopyFromReg to avoid partial register stalls.
20903 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
20904 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
20906 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
20910 // Or finally, promote i8 cmovs if we have CMOV,
20911 // or i16 cmovs if it won't prevent folding a load.
20912 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
20913 // legal, but EmitLoweredSelect() can not deal with these extensions
20914 // being inserted between two CMOV's. (in i16 case too TBN)
20915 // https://bugs.llvm.org/show_bug.cgi?id=40974
20916 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
20917 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
20918 !MayFoldLoad(Op2))) {
20919 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
20920 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
20921 SDValue Ops[] = { Op2, Op1, CC, Cond };
20922 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
20923 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
20926 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
20927 // condition is true.
20928 SDValue Ops[] = { Op2, Op1, CC, Cond };
20929 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
20932 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
20933 const X86Subtarget &Subtarget,
20934 SelectionDAG &DAG) {
20935 MVT VT = Op->getSimpleValueType(0);
20936 SDValue In = Op->getOperand(0);
20937 MVT InVT = In.getSimpleValueType();
20938 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20939 MVT VTElt = VT.getVectorElementType();
20942 unsigned NumElts = VT.getVectorNumElements();
20944 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
20946 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
20947 // If v16i32 is to be avoided, we'll need to split and concatenate.
20948 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20949 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
20951 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20954 // Widen to 512-bits if VLX is not supported.
20955 MVT WideVT = ExtVT;
20956 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20957 NumElts *= 512 / ExtVT.getSizeInBits();
20958 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20959 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
20960 In, DAG.getIntPtrConstant(0, dl));
20961 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
20965 MVT WideEltVT = WideVT.getVectorElementType();
20966 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
20967 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
20968 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
20970 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
20971 SDValue Zero = DAG.getConstant(0, dl, WideVT);
20972 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
20975 // Truncate if we had to extend i16/i8 above.
20977 WideVT = MVT::getVectorVT(VTElt, NumElts);
20978 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
20981 // Extract back to 128/256-bit if we widened.
20983 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
20984 DAG.getIntPtrConstant(0, dl));
20989 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20990 SelectionDAG &DAG) {
20991 SDValue In = Op->getOperand(0);
20992 MVT InVT = In.getSimpleValueType();
20994 if (InVT.getVectorElementType() == MVT::i1)
20995 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
20997 assert(Subtarget.hasAVX() && "Expected AVX support");
20998 return LowerAVXExtend(Op, DAG, Subtarget);
21001 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
21002 // For sign extend this needs to handle all vector sizes and SSE4.1 and
21003 // non-SSE4.1 targets. For zero extend this should only handle inputs of
21004 // MVT::v64i8 when BWI is not supported, but AVX512 is.
21005 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
21006 const X86Subtarget &Subtarget,
21007 SelectionDAG &DAG) {
21008 SDValue In = Op->getOperand(0);
21009 MVT VT = Op->getSimpleValueType(0);
21010 MVT InVT = In.getSimpleValueType();
21012 MVT SVT = VT.getVectorElementType();
21013 MVT InSVT = InVT.getVectorElementType();
21014 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
21016 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
21018 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
21020 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
21021 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
21022 !(VT.is512BitVector() && Subtarget.hasAVX512()))
21026 unsigned Opc = Op.getOpcode();
21027 unsigned NumElts = VT.getVectorNumElements();
21029 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
21030 // For 512-bit vectors, we need 128-bits or 256-bits.
21031 if (InVT.getSizeInBits() > 128) {
21032 // Input needs to be at least the same number of elements as output, and
21033 // at least 128-bits.
21034 int InSize = InSVT.getSizeInBits() * NumElts;
21035 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
21036 InVT = In.getSimpleValueType();
21039 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
21040 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
21041 // need to be handled here for 256/512-bit results.
21042 if (Subtarget.hasInt256()) {
21043 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
21045 if (InVT.getVectorNumElements() != NumElts)
21046 return DAG.getNode(Op.getOpcode(), dl, VT, In);
21048 // FIXME: Apparently we create inreg operations that could be regular
21051 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
21052 : ISD::ZERO_EXTEND;
21053 return DAG.getNode(ExtOpc, dl, VT, In);
21056 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
21057 if (Subtarget.hasAVX()) {
21058 assert(VT.is256BitVector() && "256-bit vector expected");
21059 int HalfNumElts = NumElts / 2;
21060 MVT HalfVT = MVT::getVectorVT(SVT, HalfNumElts);
21062 unsigned NumSrcElts = InVT.getVectorNumElements();
21063 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
21064 for (int i = 0; i != HalfNumElts; ++i)
21065 HiMask[i] = HalfNumElts + i;
21067 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
21068 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
21069 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
21070 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
21073 // We should only get here for sign extend.
21074 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
21075 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
21077 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
21079 SDValue SignExt = Curr;
21081 // As SRAI is only available on i16/i32 types, we expand only up to i32
21082 // and handle i64 separately.
21083 if (InVT != MVT::v4i32) {
21084 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
21086 unsigned DestWidth = DestVT.getScalarSizeInBits();
21087 unsigned Scale = DestWidth / InSVT.getSizeInBits();
21089 unsigned InNumElts = InVT.getVectorNumElements();
21090 unsigned DestElts = DestVT.getVectorNumElements();
21092 // Build a shuffle mask that takes each input element and places it in the
21093 // MSBs of the new element size.
21094 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
21095 for (unsigned i = 0; i != DestElts; ++i)
21096 Mask[i * Scale + (Scale - 1)] = i;
21098 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
21099 Curr = DAG.getBitcast(DestVT, Curr);
21101 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
21102 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
21103 DAG.getConstant(SignExtShift, dl, MVT::i8));
21106 if (VT == MVT::v2i64) {
21107 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
21108 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
21109 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
21110 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
21111 SignExt = DAG.getBitcast(VT, SignExt);
21117 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21118 SelectionDAG &DAG) {
21119 MVT VT = Op->getSimpleValueType(0);
21120 SDValue In = Op->getOperand(0);
21121 MVT InVT = In.getSimpleValueType();
21124 if (InVT.getVectorElementType() == MVT::i1)
21125 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21127 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
21128 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
21129 "Expected same number of elements");
21130 assert((VT.getVectorElementType() == MVT::i16 ||
21131 VT.getVectorElementType() == MVT::i32 ||
21132 VT.getVectorElementType() == MVT::i64) &&
21133 "Unexpected element type");
21134 assert((InVT.getVectorElementType() == MVT::i8 ||
21135 InVT.getVectorElementType() == MVT::i16 ||
21136 InVT.getVectorElementType() == MVT::i32) &&
21137 "Unexpected element type");
21139 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
21140 if (InVT == MVT::v8i8) {
21141 if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64)
21144 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
21145 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
21146 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
21149 if (Subtarget.hasInt256())
21152 // Optimize vectors in AVX mode
21153 // Sign extend v8i16 to v8i32 and
21156 // Divide input vector into two parts
21157 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
21158 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
21159 // concat the vectors to original VT
21161 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
21162 VT.getVectorNumElements() / 2);
21164 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
21166 unsigned NumElems = InVT.getVectorNumElements();
21167 SmallVector<int,8> ShufMask(NumElems, -1);
21168 for (unsigned i = 0; i != NumElems/2; ++i)
21169 ShufMask[i] = i + NumElems/2;
21171 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
21172 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
21174 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
21177 /// Change a vector store into a pair of half-size vector stores.
21178 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
21179 SDValue StoredVal = Store->getValue();
21180 assert((StoredVal.getValueType().is256BitVector() ||
21181 StoredVal.getValueType().is512BitVector()) &&
21182 "Expecting 256/512-bit op");
21184 // Splitting volatile memory ops is not allowed unless the operation was not
21185 // legal to begin with. Assume the input store is legal (this transform is
21186 // only used for targets with AVX). Note: It is possible that we have an
21187 // illegal type like v2i128, and so we could allow splitting a volatile store
21188 // in that case if that is important.
21189 if (Store->isVolatile())
21192 EVT StoreVT = StoredVal.getValueType();
21193 unsigned NumElems = StoreVT.getVectorNumElements();
21194 unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
21195 unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
21198 SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
21199 SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
21200 SDValue Ptr0 = Store->getBasePtr();
21201 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
21202 unsigned Alignment = Store->getAlignment();
21204 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
21205 Alignment, Store->getMemOperand()->getFlags());
21206 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
21207 Store->getPointerInfo().getWithOffset(HalfAlign),
21208 MinAlign(Alignment, HalfAlign),
21209 Store->getMemOperand()->getFlags());
21210 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
21213 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
21215 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
21216 SelectionDAG &DAG) {
21217 SDValue StoredVal = Store->getValue();
21218 assert(StoreVT.is128BitVector() &&
21219 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
21220 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
21222 // Splitting volatile memory ops is not allowed unless the operation was not
21223 // legal to begin with. We are assuming the input op is legal (this transform
21224 // is only used for targets with AVX).
21225 if (Store->isVolatile())
21228 MVT StoreSVT = StoreVT.getScalarType();
21229 unsigned NumElems = StoreVT.getVectorNumElements();
21230 unsigned ScalarSize = StoreSVT.getStoreSize();
21231 unsigned Alignment = Store->getAlignment();
21234 SmallVector<SDValue, 4> Stores;
21235 for (unsigned i = 0; i != NumElems; ++i) {
21236 unsigned Offset = i * ScalarSize;
21237 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
21238 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
21239 DAG.getIntPtrConstant(i, DL));
21240 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
21241 Store->getPointerInfo().getWithOffset(Offset),
21242 MinAlign(Alignment, Offset),
21243 Store->getMemOperand()->getFlags());
21244 Stores.push_back(Ch);
21246 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
21249 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
21250 SelectionDAG &DAG) {
21251 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
21253 SDValue StoredVal = St->getValue();
21255 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
21256 if (StoredVal.getValueType().isVector() &&
21257 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
21258 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
21260 assert(!St->isTruncatingStore() && "Expected non-truncating store");
21261 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21262 "Expected AVX512F without AVX512DQI");
21264 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
21265 DAG.getUNDEF(MVT::v16i1), StoredVal,
21266 DAG.getIntPtrConstant(0, dl));
21267 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
21268 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
21270 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21271 St->getPointerInfo(), St->getAlignment(),
21272 St->getMemOperand()->getFlags());
21275 if (St->isTruncatingStore())
21278 // If this is a 256-bit store of concatenated ops, we are better off splitting
21279 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
21280 // and each half can execute independently. Some cores would split the op into
21281 // halves anyway, so the concat (vinsertf128) is purely an extra op.
21282 MVT StoreVT = StoredVal.getSimpleValueType();
21283 if (StoreVT.is256BitVector()) {
21284 SmallVector<SDValue, 4> CatOps;
21285 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
21286 return splitVectorStore(St, DAG);
21290 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
21292 if (DAG.getTargetLoweringInfo().getTypeAction(*DAG.getContext(), StoreVT) !=
21293 TargetLowering::TypeWidenVector)
21296 MVT WideVT = MVT::getVectorVT(StoreVT.getVectorElementType(),
21297 StoreVT.getVectorNumElements() * 2);
21298 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
21299 DAG.getUNDEF(StoreVT));
21301 if (Subtarget.hasSSE2()) {
21302 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
21304 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
21305 MVT CastVT = MVT::getVectorVT(StVT, 2);
21306 StoredVal = DAG.getBitcast(CastVT, StoredVal);
21307 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
21308 DAG.getIntPtrConstant(0, dl));
21310 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21311 St->getPointerInfo(), St->getAlignment(),
21312 St->getMemOperand()->getFlags());
21314 assert(Subtarget.hasSSE1() && "Expected SSE");
21315 SDVTList Tys = DAG.getVTList(MVT::Other);
21316 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
21317 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
21318 St->getMemOperand());
21321 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
21322 // may emit an illegal shuffle but the expansion is still better than scalar
21323 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
21324 // we'll emit a shuffle and a arithmetic shift.
21325 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
21326 // TODO: It is possible to support ZExt by zeroing the undef values during
21327 // the shuffle phase or after the shuffle.
21328 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
21329 SelectionDAG &DAG) {
21330 MVT RegVT = Op.getSimpleValueType();
21331 assert(RegVT.isVector() && "We only custom lower vector loads.");
21332 assert(RegVT.isInteger() &&
21333 "We only custom lower integer vector loads.");
21335 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
21337 EVT MemVT = Ld->getMemoryVT();
21339 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
21340 if (RegVT.getVectorElementType() == MVT::i1) {
21341 assert(EVT(RegVT) == MemVT && "Expected non-extending load");
21342 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
21343 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21344 "Expected AVX512F without AVX512DQI");
21346 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
21347 Ld->getPointerInfo(), Ld->getAlignment(),
21348 Ld->getMemOperand()->getFlags());
21350 // Replace chain users with the new chain.
21351 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
21353 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
21354 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
21355 DAG.getBitcast(MVT::v16i1, Val),
21356 DAG.getIntPtrConstant(0, dl));
21357 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
21360 // Nothing useful we can do without SSE2 shuffles.
21361 assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
21363 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21364 unsigned RegSz = RegVT.getSizeInBits();
21366 ISD::LoadExtType Ext = Ld->getExtensionType();
21368 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
21369 && "Only anyext and sext are currently implemented.");
21370 assert(MemVT != RegVT && "Cannot extend to the same type");
21371 assert(MemVT.isVector() && "Must load a vector from memory");
21373 unsigned NumElems = RegVT.getVectorNumElements();
21374 unsigned MemSz = MemVT.getSizeInBits();
21375 assert(RegSz > MemSz && "Register size must be greater than the mem size");
21377 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
21378 // The only way in which we have a legal 256-bit vector result but not the
21379 // integer 256-bit operations needed to directly lower a sextload is if we
21380 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
21381 // a 128-bit vector and a normal sign_extend to 256-bits that should get
21382 // correctly legalized. We do this late to allow the canonical form of
21383 // sextload to persist throughout the rest of the DAG combiner -- it wants
21384 // to fold together any extensions it can, and so will fuse a sign_extend
21385 // of an sextload into a sextload targeting a wider value.
21387 if (MemSz == 128) {
21388 // Just switch this to a normal load.
21389 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
21390 "it must be a legal 128-bit vector "
21392 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
21393 Ld->getPointerInfo(), Ld->getAlignment(),
21394 Ld->getMemOperand()->getFlags());
21396 assert(MemSz < 128 &&
21397 "Can't extend a type wider than 128 bits to a 256 bit vector!");
21398 // Do an sext load to a 128-bit vector type. We want to use the same
21399 // number of elements, but elements half as wide. This will end up being
21400 // recursively lowered by this routine, but will succeed as we definitely
21401 // have all the necessary features if we're using AVX1.
21403 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
21404 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
21406 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
21407 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
21408 Ld->getMemOperand()->getFlags());
21411 // Replace chain users with the new chain.
21412 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
21414 // Finally, do a normal sign-extend to the desired register.
21415 SDValue SExt = DAG.getSExtOrTrunc(Load, dl, RegVT);
21416 return DAG.getMergeValues({SExt, Load.getValue(1)}, dl);
21419 // All sizes must be a power of two.
21420 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
21421 "Non-power-of-two elements are not custom lowered!");
21423 // Attempt to load the original value using scalar loads.
21424 // Find the largest scalar type that divides the total loaded size.
21425 MVT SclrLoadTy = MVT::i8;
21426 for (MVT Tp : MVT::integer_valuetypes()) {
21427 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
21432 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
21433 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
21435 SclrLoadTy = MVT::f64;
21437 // Calculate the number of scalar loads that we need to perform
21438 // in order to load our vector from memory.
21439 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
21441 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
21442 "Can only lower sext loads with a single scalar load!");
21444 unsigned loadRegSize = RegSz;
21445 if (Ext == ISD::SEXTLOAD && RegSz >= 256)
21448 // If we don't have BWI we won't be able to create the shuffle needed for
21450 if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
21451 MemVT == MVT::v8i8)
21454 // Represent our vector as a sequence of elements which are the
21455 // largest scalar that we can load.
21456 EVT LoadUnitVecVT = EVT::getVectorVT(
21457 *DAG.getContext(), SclrLoadTy, loadRegSize / SclrLoadTy.getSizeInBits());
21459 // Represent the data using the same element type that is stored in
21460 // memory. In practice, we ''widen'' MemVT.
21462 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
21463 loadRegSize / MemVT.getScalarSizeInBits());
21465 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
21466 "Invalid vector type");
21468 // We can't shuffle using an illegal type.
21469 assert(TLI.isTypeLegal(WideVecVT) &&
21470 "We only lower types that form legal widened vector types");
21472 SmallVector<SDValue, 8> Chains;
21473 SDValue Ptr = Ld->getBasePtr();
21474 unsigned OffsetInc = SclrLoadTy.getSizeInBits() / 8;
21475 SDValue Increment = DAG.getConstant(OffsetInc, dl,
21476 TLI.getPointerTy(DAG.getDataLayout()));
21477 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
21479 unsigned Offset = 0;
21480 for (unsigned i = 0; i < NumLoads; ++i) {
21481 unsigned NewAlign = MinAlign(Ld->getAlignment(), Offset);
21483 // Perform a single load.
21484 SDValue ScalarLoad =
21485 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr,
21486 Ld->getPointerInfo().getWithOffset(Offset),
21487 NewAlign, Ld->getMemOperand()->getFlags());
21488 Chains.push_back(ScalarLoad.getValue(1));
21489 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
21490 // another round of DAGCombining.
21492 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
21494 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
21495 ScalarLoad, DAG.getIntPtrConstant(i, dl));
21497 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
21498 Offset += OffsetInc;
21501 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
21503 // Bitcast the loaded value to a vector of the original element type, in
21504 // the size of the target vector type.
21505 SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
21506 unsigned SizeRatio = RegSz / MemSz;
21508 if (Ext == ISD::SEXTLOAD) {
21509 SDValue Sext = getExtendInVec(ISD::SIGN_EXTEND, dl, RegVT, SlicedVec, DAG);
21510 return DAG.getMergeValues({Sext, TF}, dl);
21513 if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
21514 MemVT == MVT::v8i8) {
21515 SDValue Sext = getExtendInVec(ISD::ZERO_EXTEND, dl, RegVT, SlicedVec, DAG);
21516 return DAG.getMergeValues({Sext, TF}, dl);
21519 // Redistribute the loaded elements into the different locations.
21520 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
21521 for (unsigned i = 0; i != NumElems; ++i)
21522 ShuffleVec[i * SizeRatio] = i;
21524 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
21525 DAG.getUNDEF(WideVecVT), ShuffleVec);
21527 // Bitcast to the requested type.
21528 Shuff = DAG.getBitcast(RegVT, Shuff);
21529 return DAG.getMergeValues({Shuff, TF}, dl);
21532 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
21533 /// each of which has no other use apart from the AND / OR.
21534 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
21535 Opc = Op.getOpcode();
21536 if (Opc != ISD::OR && Opc != ISD::AND)
21538 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21539 Op.getOperand(0).hasOneUse() &&
21540 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
21541 Op.getOperand(1).hasOneUse());
21544 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
21545 /// SETCC node has a single use.
21546 static bool isXor1OfSetCC(SDValue Op) {
21547 if (Op.getOpcode() != ISD::XOR)
21549 if (isOneConstant(Op.getOperand(1)))
21550 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21551 Op.getOperand(0).hasOneUse();
21555 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
21556 bool addTest = true;
21557 SDValue Chain = Op.getOperand(0);
21558 SDValue Cond = Op.getOperand(1);
21559 SDValue Dest = Op.getOperand(2);
21562 bool Inverted = false;
21564 if (Cond.getOpcode() == ISD::SETCC) {
21565 // Check for setcc([su]{add,sub,mul}o == 0).
21566 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
21567 isNullConstant(Cond.getOperand(1)) &&
21568 Cond.getOperand(0).getResNo() == 1 &&
21569 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
21570 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
21571 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
21572 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
21573 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
21574 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
21576 Cond = Cond.getOperand(0);
21578 if (SDValue NewCond = LowerSETCC(Cond, DAG))
21583 // FIXME: LowerXALUO doesn't handle these!!
21584 else if (Cond.getOpcode() == X86ISD::ADD ||
21585 Cond.getOpcode() == X86ISD::SUB ||
21586 Cond.getOpcode() == X86ISD::SMUL ||
21587 Cond.getOpcode() == X86ISD::UMUL)
21588 Cond = LowerXALUO(Cond, DAG);
21591 // Look pass (and (setcc_carry (cmp ...)), 1).
21592 if (Cond.getOpcode() == ISD::AND &&
21593 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
21594 isOneConstant(Cond.getOperand(1)))
21595 Cond = Cond.getOperand(0);
21597 // If condition flag is set by a X86ISD::CMP, then use it as the condition
21598 // setting operand in place of the X86ISD::SETCC.
21599 unsigned CondOpcode = Cond.getOpcode();
21600 if (CondOpcode == X86ISD::SETCC ||
21601 CondOpcode == X86ISD::SETCC_CARRY) {
21602 CC = Cond.getOperand(0);
21604 SDValue Cmp = Cond.getOperand(1);
21605 unsigned Opc = Cmp.getOpcode();
21606 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
21607 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
21611 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
21615 // These can only come from an arithmetic instruction with overflow,
21616 // e.g. SADDO, UADDO.
21617 Cond = Cond.getOperand(1);
21623 CondOpcode = Cond.getOpcode();
21624 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
21625 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
21626 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
21628 X86::CondCode X86Cond;
21629 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
21632 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
21634 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
21638 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
21639 SDValue Cmp = Cond.getOperand(0).getOperand(1);
21640 if (CondOpc == ISD::OR) {
21641 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
21642 // two branches instead of an explicit OR instruction with a
21644 if (Cmp == Cond.getOperand(1).getOperand(1) &&
21645 isX86LogicalCmp(Cmp)) {
21646 CC = Cond.getOperand(0).getOperand(0);
21647 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21648 Chain, Dest, CC, Cmp);
21649 CC = Cond.getOperand(1).getOperand(0);
21653 } else { // ISD::AND
21654 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
21655 // two branches instead of an explicit AND instruction with a
21656 // separate test. However, we only do this if this block doesn't
21657 // have a fall-through edge, because this requires an explicit
21658 // jmp when the condition is false.
21659 if (Cmp == Cond.getOperand(1).getOperand(1) &&
21660 isX86LogicalCmp(Cmp) &&
21661 Op.getNode()->hasOneUse()) {
21662 X86::CondCode CCode =
21663 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
21664 CCode = X86::GetOppositeBranchCondition(CCode);
21665 CC = DAG.getConstant(CCode, dl, MVT::i8);
21666 SDNode *User = *Op.getNode()->use_begin();
21667 // Look for an unconditional branch following this conditional branch.
21668 // We need this because we need to reverse the successors in order
21669 // to implement FCMP_OEQ.
21670 if (User->getOpcode() == ISD::BR) {
21671 SDValue FalseBB = User->getOperand(1);
21673 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
21674 assert(NewBR == User);
21678 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21679 Chain, Dest, CC, Cmp);
21680 X86::CondCode CCode =
21681 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
21682 CCode = X86::GetOppositeBranchCondition(CCode);
21683 CC = DAG.getConstant(CCode, dl, MVT::i8);
21689 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
21690 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
21691 // It should be transformed during dag combiner except when the condition
21692 // is set by a arithmetics with overflow node.
21693 X86::CondCode CCode =
21694 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
21695 CCode = X86::GetOppositeBranchCondition(CCode);
21696 CC = DAG.getConstant(CCode, dl, MVT::i8);
21697 Cond = Cond.getOperand(0).getOperand(1);
21699 } else if (Cond.getOpcode() == ISD::SETCC &&
21700 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
21701 // For FCMP_OEQ, we can emit
21702 // two branches instead of an explicit AND instruction with a
21703 // separate test. However, we only do this if this block doesn't
21704 // have a fall-through edge, because this requires an explicit
21705 // jmp when the condition is false.
21706 if (Op.getNode()->hasOneUse()) {
21707 SDNode *User = *Op.getNode()->use_begin();
21708 // Look for an unconditional branch following this conditional branch.
21709 // We need this because we need to reverse the successors in order
21710 // to implement FCMP_OEQ.
21711 if (User->getOpcode() == ISD::BR) {
21712 SDValue FalseBB = User->getOperand(1);
21714 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
21715 assert(NewBR == User);
21719 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
21720 Cond.getOperand(0), Cond.getOperand(1));
21721 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21722 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
21723 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21724 Chain, Dest, CC, Cmp);
21725 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
21730 } else if (Cond.getOpcode() == ISD::SETCC &&
21731 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
21732 // For FCMP_UNE, we can emit
21733 // two branches instead of an explicit OR instruction with a
21735 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
21736 Cond.getOperand(0), Cond.getOperand(1));
21737 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21738 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
21739 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21740 Chain, Dest, CC, Cmp);
21741 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
21748 // Look pass the truncate if the high bits are known zero.
21749 if (isTruncWithZeroHighBitsInput(Cond, DAG))
21750 Cond = Cond.getOperand(0);
21752 // We know the result of AND is compared against zero. Try to match
21754 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
21756 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
21765 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
21766 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
21767 Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
21770 Cond = ConvertCmpIfNecessary(Cond, DAG);
21771 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
21772 Chain, Dest, CC, Cond);
21775 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
21776 // Calls to _alloca are needed to probe the stack when allocating more than 4k
21777 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
21778 // that the guard pages used by the OS virtual memory manager are allocated in
21779 // correct sequence.
21781 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
21782 SelectionDAG &DAG) const {
21783 MachineFunction &MF = DAG.getMachineFunction();
21784 bool SplitStack = MF.shouldSplitStack();
21785 bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
21786 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
21787 SplitStack || EmitStackProbe;
21791 SDNode *Node = Op.getNode();
21792 SDValue Chain = Op.getOperand(0);
21793 SDValue Size = Op.getOperand(1);
21794 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
21795 EVT VT = Node->getValueType(0);
21797 // Chain the dynamic stack allocation so that it doesn't modify the stack
21798 // pointer when other instructions are using the stack.
21799 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
21801 bool Is64Bit = Subtarget.is64Bit();
21802 MVT SPTy = getPointerTy(DAG.getDataLayout());
21806 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21807 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
21808 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
21809 " not tell us which reg is the stack pointer!");
21811 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
21812 Chain = SP.getValue(1);
21813 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
21814 unsigned StackAlign = TFI.getStackAlignment();
21815 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
21816 if (Align > StackAlign)
21817 Result = DAG.getNode(ISD::AND, dl, VT, Result,
21818 DAG.getConstant(-(uint64_t)Align, dl, VT));
21819 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
21820 } else if (SplitStack) {
21821 MachineRegisterInfo &MRI = MF.getRegInfo();
21824 // The 64 bit implementation of segmented stacks needs to clobber both r10
21825 // r11. This makes it impossible to use it along with nested parameters.
21826 const Function &F = MF.getFunction();
21827 for (const auto &A : F.args()) {
21828 if (A.hasNestAttr())
21829 report_fatal_error("Cannot use segmented stacks with functions that "
21830 "have nested arguments.");
21834 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
21835 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
21836 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
21837 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
21838 DAG.getRegister(Vreg, SPTy));
21840 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
21841 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
21842 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
21844 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
21845 unsigned SPReg = RegInfo->getStackRegister();
21846 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
21847 Chain = SP.getValue(1);
21850 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
21851 DAG.getConstant(-(uint64_t)Align, dl, VT));
21852 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
21858 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
21859 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
21861 SDValue Ops[2] = {Result, Chain};
21862 return DAG.getMergeValues(Ops, dl);
21865 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
21866 MachineFunction &MF = DAG.getMachineFunction();
21867 auto PtrVT = getPointerTy(MF.getDataLayout());
21868 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
21870 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
21873 if (!Subtarget.is64Bit() ||
21874 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
21875 // vastart just stores the address of the VarArgsFrameIndex slot into the
21876 // memory location argument.
21877 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
21878 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
21879 MachinePointerInfo(SV));
21883 // gp_offset (0 - 6 * 8)
21884 // fp_offset (48 - 48 + 8 * 16)
21885 // overflow_arg_area (point to parameters coming in memory).
21887 SmallVector<SDValue, 8> MemOps;
21888 SDValue FIN = Op.getOperand(1);
21890 SDValue Store = DAG.getStore(
21891 Op.getOperand(0), DL,
21892 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
21893 MachinePointerInfo(SV));
21894 MemOps.push_back(Store);
21897 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
21898 Store = DAG.getStore(
21899 Op.getOperand(0), DL,
21900 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
21901 MachinePointerInfo(SV, 4));
21902 MemOps.push_back(Store);
21904 // Store ptr to overflow_arg_area
21905 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
21906 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
21908 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
21909 MemOps.push_back(Store);
21911 // Store ptr to reg_save_area.
21912 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
21913 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
21914 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
21915 Store = DAG.getStore(
21916 Op.getOperand(0), DL, RSFIN, FIN,
21917 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
21918 MemOps.push_back(Store);
21919 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
21922 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
21923 assert(Subtarget.is64Bit() &&
21924 "LowerVAARG only handles 64-bit va_arg!");
21925 assert(Op.getNumOperands() == 4);
21927 MachineFunction &MF = DAG.getMachineFunction();
21928 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
21929 // The Win64 ABI uses char* instead of a structure.
21930 return DAG.expandVAArg(Op.getNode());
21932 SDValue Chain = Op.getOperand(0);
21933 SDValue SrcPtr = Op.getOperand(1);
21934 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
21935 unsigned Align = Op.getConstantOperandVal(3);
21938 EVT ArgVT = Op.getNode()->getValueType(0);
21939 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
21940 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
21943 // Decide which area this value should be read from.
21944 // TODO: Implement the AMD64 ABI in its entirety. This simple
21945 // selection mechanism works only for the basic types.
21946 if (ArgVT == MVT::f80) {
21947 llvm_unreachable("va_arg for f80 not yet implemented");
21948 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
21949 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
21950 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
21951 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
21953 llvm_unreachable("Unhandled argument type in LowerVAARG");
21956 if (ArgMode == 2) {
21957 // Sanity Check: Make sure using fp_offset makes sense.
21958 assert(!Subtarget.useSoftFloat() &&
21959 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
21960 Subtarget.hasSSE1());
21963 // Insert VAARG_64 node into the DAG
21964 // VAARG_64 returns two values: Variable Argument Address, Chain
21965 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
21966 DAG.getConstant(ArgMode, dl, MVT::i8),
21967 DAG.getConstant(Align, dl, MVT::i32)};
21968 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
21969 SDValue VAARG = DAG.getMemIntrinsicNode(
21970 X86ISD::VAARG_64, dl,
21971 VTs, InstOps, MVT::i64,
21972 MachinePointerInfo(SV),
21974 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
21975 Chain = VAARG.getValue(1);
21977 // Load the next argument and return it
21978 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
21981 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
21982 SelectionDAG &DAG) {
21983 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
21984 // where a va_list is still an i8*.
21985 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
21986 if (Subtarget.isCallingConvWin64(
21987 DAG.getMachineFunction().getFunction().getCallingConv()))
21988 // Probably a Win64 va_copy.
21989 return DAG.expandVACopy(Op.getNode());
21991 SDValue Chain = Op.getOperand(0);
21992 SDValue DstPtr = Op.getOperand(1);
21993 SDValue SrcPtr = Op.getOperand(2);
21994 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
21995 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
21998 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
21999 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
22001 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
22004 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
22005 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
22009 case X86ISD::VSHLI:
22010 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
22013 case X86ISD::VSRLI:
22014 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
22017 case X86ISD::VSRAI:
22018 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
22020 llvm_unreachable("Unknown target vector shift node");
22023 /// Handle vector element shifts where the shift amount is a constant.
22024 /// Takes immediate version of shift as input.
22025 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
22026 SDValue SrcOp, uint64_t ShiftAmt,
22027 SelectionDAG &DAG) {
22028 MVT ElementType = VT.getVectorElementType();
22030 // Bitcast the source vector to the output type, this is mainly necessary for
22031 // vXi8/vXi64 shifts.
22032 if (VT != SrcOp.getSimpleValueType())
22033 SrcOp = DAG.getBitcast(VT, SrcOp);
22035 // Fold this packed shift into its first operand if ShiftAmt is 0.
22039 // Check for ShiftAmt >= element width
22040 if (ShiftAmt >= ElementType.getSizeInBits()) {
22041 if (Opc == X86ISD::VSRAI)
22042 ShiftAmt = ElementType.getSizeInBits() - 1;
22044 return DAG.getConstant(0, dl, VT);
22047 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
22048 && "Unknown target vector shift-by-constant node");
22050 // Fold this packed vector shift into a build vector if SrcOp is a
22051 // vector of Constants or UNDEFs.
22052 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
22053 SmallVector<SDValue, 8> Elts;
22054 unsigned NumElts = SrcOp->getNumOperands();
22057 default: llvm_unreachable("Unknown opcode!");
22058 case X86ISD::VSHLI:
22059 for (unsigned i = 0; i != NumElts; ++i) {
22060 SDValue CurrentOp = SrcOp->getOperand(i);
22061 if (CurrentOp->isUndef()) {
22062 Elts.push_back(CurrentOp);
22065 auto *ND = cast<ConstantSDNode>(CurrentOp);
22066 const APInt &C = ND->getAPIntValue();
22067 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
22070 case X86ISD::VSRLI:
22071 for (unsigned i = 0; i != NumElts; ++i) {
22072 SDValue CurrentOp = SrcOp->getOperand(i);
22073 if (CurrentOp->isUndef()) {
22074 Elts.push_back(CurrentOp);
22077 auto *ND = cast<ConstantSDNode>(CurrentOp);
22078 const APInt &C = ND->getAPIntValue();
22079 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
22082 case X86ISD::VSRAI:
22083 for (unsigned i = 0; i != NumElts; ++i) {
22084 SDValue CurrentOp = SrcOp->getOperand(i);
22085 if (CurrentOp->isUndef()) {
22086 Elts.push_back(CurrentOp);
22089 auto *ND = cast<ConstantSDNode>(CurrentOp);
22090 const APInt &C = ND->getAPIntValue();
22091 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
22096 return DAG.getBuildVector(VT, dl, Elts);
22099 return DAG.getNode(Opc, dl, VT, SrcOp,
22100 DAG.getConstant(ShiftAmt, dl, MVT::i8));
22103 /// Handle vector element shifts where the shift amount may or may not be a
22104 /// constant. Takes immediate version of shift as input.
22105 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
22106 SDValue SrcOp, SDValue ShAmt,
22107 const X86Subtarget &Subtarget,
22108 SelectionDAG &DAG) {
22109 MVT SVT = ShAmt.getSimpleValueType();
22110 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
22112 // Catch shift-by-constant.
22113 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
22114 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
22115 CShAmt->getZExtValue(), DAG);
22117 // Change opcode to non-immediate version.
22118 Opc = getTargetVShiftUniformOpcode(Opc, true);
22120 // Need to build a vector containing shift amount.
22121 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
22122 // +====================+============+=======================================+
22123 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
22124 // +====================+============+=======================================+
22125 // | i64 | Yes, No | Use ShAmt as lowest elt |
22126 // | i32 | Yes | zero-extend in-reg |
22127 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
22128 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
22129 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
22130 // +====================+============+=======================================+
22132 if (SVT == MVT::i64)
22133 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
22134 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
22135 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
22136 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
22137 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
22138 ShAmt = ShAmt.getOperand(0);
22139 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
22140 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
22141 if (Subtarget.hasSSE41())
22142 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22143 MVT::v2i64, ShAmt);
22145 SDValue ByteShift = DAG.getConstant(
22146 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
22147 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
22148 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22150 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22153 } else if (Subtarget.hasSSE41() &&
22154 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22155 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
22156 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22157 MVT::v2i64, ShAmt);
22159 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
22160 DAG.getUNDEF(SVT)};
22161 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
22164 // The return type has to be a 128-bit type with the same element
22165 // type as the input type.
22166 MVT EltVT = VT.getVectorElementType();
22167 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
22169 ShAmt = DAG.getBitcast(ShVT, ShAmt);
22170 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
22173 /// Return Mask with the necessary casting or extending
22174 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
22175 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
22176 const X86Subtarget &Subtarget, SelectionDAG &DAG,
22179 if (isAllOnesConstant(Mask))
22180 return DAG.getConstant(1, dl, MaskVT);
22181 if (X86::isZeroNode(Mask))
22182 return DAG.getConstant(0, dl, MaskVT);
22184 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
22186 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
22187 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
22188 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
22189 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
22191 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22192 DAG.getConstant(0, dl, MVT::i32));
22193 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22194 DAG.getConstant(1, dl, MVT::i32));
22196 Lo = DAG.getBitcast(MVT::v32i1, Lo);
22197 Hi = DAG.getBitcast(MVT::v32i1, Hi);
22199 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
22201 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
22202 Mask.getSimpleValueType().getSizeInBits());
22203 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
22204 // are extracted by EXTRACT_SUBVECTOR.
22205 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
22206 DAG.getBitcast(BitcastVT, Mask),
22207 DAG.getIntPtrConstant(0, dl));
22211 /// Return (and \p Op, \p Mask) for compare instructions or
22212 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
22213 /// necessary casting or extending for \p Mask when lowering masking intrinsics
22214 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
22215 SDValue PreservedSrc,
22216 const X86Subtarget &Subtarget,
22217 SelectionDAG &DAG) {
22218 MVT VT = Op.getSimpleValueType();
22219 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
22220 unsigned OpcodeSelect = ISD::VSELECT;
22223 if (isAllOnesConstant(Mask))
22226 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22228 if (PreservedSrc.isUndef())
22229 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22230 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
22233 /// Creates an SDNode for a predicated scalar operation.
22234 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
22235 /// The mask is coming as MVT::i8 and it should be transformed
22236 /// to MVT::v1i1 while lowering masking intrinsics.
22237 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
22238 /// "X86select" instead of "vselect". We just can't create the "vselect" node
22239 /// for a scalar instruction.
22240 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
22241 SDValue PreservedSrc,
22242 const X86Subtarget &Subtarget,
22243 SelectionDAG &DAG) {
22245 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
22246 if (MaskConst->getZExtValue() & 0x1)
22249 MVT VT = Op.getSimpleValueType();
22252 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
22253 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
22254 DAG.getBitcast(MVT::v8i1, Mask),
22255 DAG.getIntPtrConstant(0, dl));
22256 if (Op.getOpcode() == X86ISD::FSETCCM ||
22257 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
22258 Op.getOpcode() == X86ISD::VFPCLASSS)
22259 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
22261 if (PreservedSrc.isUndef())
22262 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22263 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
22266 static int getSEHRegistrationNodeSize(const Function *Fn) {
22267 if (!Fn->hasPersonalityFn())
22268 report_fatal_error(
22269 "querying registration node size for function without personality");
22270 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
22271 // WinEHStatePass for the full struct definition.
22272 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
22273 case EHPersonality::MSVC_X86SEH: return 24;
22274 case EHPersonality::MSVC_CXX: return 16;
22277 report_fatal_error(
22278 "can only recover FP for 32-bit MSVC EH personality functions");
22281 /// When the MSVC runtime transfers control to us, either to an outlined
22282 /// function or when returning to a parent frame after catching an exception, we
22283 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
22284 /// Here's the math:
22285 /// RegNodeBase = EntryEBP - RegNodeSize
22286 /// ParentFP = RegNodeBase - ParentFrameOffset
22287 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
22288 /// subtracting the offset (negative on x86) takes us back to the parent FP.
22289 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
22290 SDValue EntryEBP) {
22291 MachineFunction &MF = DAG.getMachineFunction();
22294 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22295 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
22297 // It's possible that the parent function no longer has a personality function
22298 // if the exceptional code was optimized away, in which case we just return
22299 // the incoming EBP.
22300 if (!Fn->hasPersonalityFn())
22303 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
22304 // registration, or the .set_setframe offset.
22305 MCSymbol *OffsetSym =
22306 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
22307 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
22308 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
22309 SDValue ParentFrameOffset =
22310 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
22312 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
22313 // prologue to RBP in the parent function.
22314 const X86Subtarget &Subtarget =
22315 static_cast<const X86Subtarget &>(DAG.getSubtarget());
22316 if (Subtarget.is64Bit())
22317 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
22319 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
22320 // RegNodeBase = EntryEBP - RegNodeSize
22321 // ParentFP = RegNodeBase - ParentFrameOffset
22322 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
22323 DAG.getConstant(RegNodeSize, dl, PtrVT));
22324 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
22327 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
22328 SelectionDAG &DAG) const {
22329 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
22330 auto isRoundModeCurDirection = [](SDValue Rnd) {
22331 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22332 return C->getZExtValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
22336 auto isRoundModeSAE = [](SDValue Rnd) {
22337 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22338 return C->getZExtValue() == X86::STATIC_ROUNDING::NO_EXC;
22342 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
22343 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22344 RC = C->getZExtValue();
22345 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22346 // Clear the NO_EXC bit and check remaining bits.
22347 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22348 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
22349 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
22350 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
22351 RC == X86::STATIC_ROUNDING::TO_ZERO;
22359 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22360 MVT VT = Op.getSimpleValueType();
22361 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
22363 switch(IntrData->Type) {
22364 case INTR_TYPE_1OP: {
22365 // We specify 2 possible opcodes for intrinsics with rounding modes.
22366 // First, we check if the intrinsic may have non-default rounding mode,
22367 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22368 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22369 if (IntrWithRoundingModeOpcode != 0) {
22370 SDValue Rnd = Op.getOperand(2);
22372 if (isRoundModeSAEToX(Rnd, RC))
22373 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22375 DAG.getTargetConstant(RC, dl, MVT::i32));
22376 if (!isRoundModeCurDirection(Rnd))
22379 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
22381 case INTR_TYPE_1OP_SAE: {
22382 SDValue Sae = Op.getOperand(2);
22385 if (isRoundModeCurDirection(Sae))
22386 Opc = IntrData->Opc0;
22387 else if (isRoundModeSAE(Sae))
22388 Opc = IntrData->Opc1;
22392 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
22394 case INTR_TYPE_2OP: {
22395 SDValue Src2 = Op.getOperand(2);
22397 // We specify 2 possible opcodes for intrinsics with rounding modes.
22398 // First, we check if the intrinsic may have non-default rounding mode,
22399 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22400 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22401 if (IntrWithRoundingModeOpcode != 0) {
22402 SDValue Rnd = Op.getOperand(3);
22404 if (isRoundModeSAEToX(Rnd, RC))
22405 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22406 Op.getOperand(1), Src2,
22407 DAG.getTargetConstant(RC, dl, MVT::i32));
22408 if (!isRoundModeCurDirection(Rnd))
22412 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22413 Op.getOperand(1), Src2);
22415 case INTR_TYPE_2OP_SAE: {
22416 SDValue Sae = Op.getOperand(3);
22419 if (isRoundModeCurDirection(Sae))
22420 Opc = IntrData->Opc0;
22421 else if (isRoundModeSAE(Sae))
22422 Opc = IntrData->Opc1;
22426 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
22429 case INTR_TYPE_3OP:
22430 case INTR_TYPE_3OP_IMM8: {
22431 SDValue Src1 = Op.getOperand(1);
22432 SDValue Src2 = Op.getOperand(2);
22433 SDValue Src3 = Op.getOperand(3);
22435 if (IntrData->Type == INTR_TYPE_3OP_IMM8)
22436 Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
22438 // We specify 2 possible opcodes for intrinsics with rounding modes.
22439 // First, we check if the intrinsic may have non-default rounding mode,
22440 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22441 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22442 if (IntrWithRoundingModeOpcode != 0) {
22443 SDValue Rnd = Op.getOperand(4);
22445 if (isRoundModeSAEToX(Rnd, RC))
22446 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22448 DAG.getTargetConstant(RC, dl, MVT::i32));
22449 if (!isRoundModeCurDirection(Rnd))
22453 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22456 case INTR_TYPE_4OP:
22457 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
22458 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
22459 case INTR_TYPE_1OP_MASK: {
22460 SDValue Src = Op.getOperand(1);
22461 SDValue PassThru = Op.getOperand(2);
22462 SDValue Mask = Op.getOperand(3);
22463 // We add rounding mode to the Node when
22464 // - RC Opcode is specified and
22465 // - RC is not "current direction".
22466 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22467 if (IntrWithRoundingModeOpcode != 0) {
22468 SDValue Rnd = Op.getOperand(4);
22470 if (isRoundModeSAEToX(Rnd, RC))
22471 return getVectorMaskingNode(
22472 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22473 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
22474 Mask, PassThru, Subtarget, DAG);
22475 if (!isRoundModeCurDirection(Rnd))
22478 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
22479 Mask, PassThru, Subtarget, DAG);
22481 case INTR_TYPE_1OP_MASK_SAE: {
22482 SDValue Src = Op.getOperand(1);
22483 SDValue PassThru = Op.getOperand(2);
22484 SDValue Mask = Op.getOperand(3);
22485 SDValue Rnd = Op.getOperand(4);
22488 if (isRoundModeCurDirection(Rnd))
22489 Opc = IntrData->Opc0;
22490 else if (isRoundModeSAE(Rnd))
22491 Opc = IntrData->Opc1;
22495 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src),
22496 Mask, PassThru, Subtarget, DAG);
22498 case INTR_TYPE_SCALAR_MASK: {
22499 SDValue Src1 = Op.getOperand(1);
22500 SDValue Src2 = Op.getOperand(2);
22501 SDValue passThru = Op.getOperand(3);
22502 SDValue Mask = Op.getOperand(4);
22503 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22504 // There are 2 kinds of intrinsics in this group:
22505 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
22506 // (2) With rounding mode and sae - 7 operands.
22507 bool HasRounding = IntrWithRoundingModeOpcode != 0;
22508 if (Op.getNumOperands() == (5U + HasRounding)) {
22510 SDValue Rnd = Op.getOperand(5);
22512 if (isRoundModeSAEToX(Rnd, RC))
22513 return getScalarMaskingNode(
22514 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
22515 DAG.getTargetConstant(RC, dl, MVT::i32)),
22516 Mask, passThru, Subtarget, DAG);
22517 if (!isRoundModeCurDirection(Rnd))
22520 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
22522 Mask, passThru, Subtarget, DAG);
22525 assert(Op.getNumOperands() == (6U + HasRounding) &&
22526 "Unexpected intrinsic form");
22527 SDValue RoundingMode = Op.getOperand(5);
22528 unsigned Opc = IntrData->Opc0;
22530 SDValue Sae = Op.getOperand(6);
22531 if (isRoundModeSAE(Sae))
22532 Opc = IntrWithRoundingModeOpcode;
22533 else if (!isRoundModeCurDirection(Sae))
22536 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
22537 Src2, RoundingMode),
22538 Mask, passThru, Subtarget, DAG);
22540 case INTR_TYPE_SCALAR_MASK_RND: {
22541 SDValue Src1 = Op.getOperand(1);
22542 SDValue Src2 = Op.getOperand(2);
22543 SDValue passThru = Op.getOperand(3);
22544 SDValue Mask = Op.getOperand(4);
22545 SDValue Rnd = Op.getOperand(5);
22549 if (isRoundModeCurDirection(Rnd))
22550 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22551 else if (isRoundModeSAEToX(Rnd, RC))
22552 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22553 DAG.getTargetConstant(RC, dl, MVT::i32));
22557 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
22559 case INTR_TYPE_SCALAR_MASK_SAE: {
22560 SDValue Src1 = Op.getOperand(1);
22561 SDValue Src2 = Op.getOperand(2);
22562 SDValue passThru = Op.getOperand(3);
22563 SDValue Mask = Op.getOperand(4);
22564 SDValue Sae = Op.getOperand(5);
22566 if (isRoundModeCurDirection(Sae))
22567 Opc = IntrData->Opc0;
22568 else if (isRoundModeSAE(Sae))
22569 Opc = IntrData->Opc1;
22573 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22574 Mask, passThru, Subtarget, DAG);
22576 case INTR_TYPE_2OP_MASK: {
22577 SDValue Src1 = Op.getOperand(1);
22578 SDValue Src2 = Op.getOperand(2);
22579 SDValue PassThru = Op.getOperand(3);
22580 SDValue Mask = Op.getOperand(4);
22582 if (IntrData->Opc1 != 0) {
22583 SDValue Rnd = Op.getOperand(5);
22585 if (isRoundModeSAEToX(Rnd, RC))
22586 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22587 DAG.getTargetConstant(RC, dl, MVT::i32));
22588 else if (!isRoundModeCurDirection(Rnd))
22592 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22593 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
22595 case INTR_TYPE_2OP_MASK_SAE: {
22596 SDValue Src1 = Op.getOperand(1);
22597 SDValue Src2 = Op.getOperand(2);
22598 SDValue PassThru = Op.getOperand(3);
22599 SDValue Mask = Op.getOperand(4);
22601 unsigned Opc = IntrData->Opc0;
22602 if (IntrData->Opc1 != 0) {
22603 SDValue Sae = Op.getOperand(5);
22604 if (isRoundModeSAE(Sae))
22605 Opc = IntrData->Opc1;
22606 else if (!isRoundModeCurDirection(Sae))
22610 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22611 Mask, PassThru, Subtarget, DAG);
22613 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
22614 SDValue Src1 = Op.getOperand(1);
22615 SDValue Src2 = Op.getOperand(2);
22616 SDValue Src3 = Op.getOperand(3);
22617 SDValue PassThru = Op.getOperand(4);
22618 SDValue Mask = Op.getOperand(5);
22619 SDValue Sae = Op.getOperand(6);
22621 if (isRoundModeCurDirection(Sae))
22622 Opc = IntrData->Opc0;
22623 else if (isRoundModeSAE(Sae))
22624 Opc = IntrData->Opc1;
22628 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
22629 Mask, PassThru, Subtarget, DAG);
22631 case INTR_TYPE_3OP_MASK_SAE: {
22632 SDValue Src1 = Op.getOperand(1);
22633 SDValue Src2 = Op.getOperand(2);
22634 SDValue Src3 = Op.getOperand(3);
22635 SDValue PassThru = Op.getOperand(4);
22636 SDValue Mask = Op.getOperand(5);
22638 unsigned Opc = IntrData->Opc0;
22639 if (IntrData->Opc1 != 0) {
22640 SDValue Sae = Op.getOperand(6);
22641 if (isRoundModeSAE(Sae))
22642 Opc = IntrData->Opc1;
22643 else if (!isRoundModeCurDirection(Sae))
22646 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
22647 Mask, PassThru, Subtarget, DAG);
22650 SDValue Src1 = Op.getOperand(1);
22651 SDValue Src2 = Op.getOperand(2);
22652 SDValue Src3 = Op.getOperand(3);
22654 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
22655 Src3 = DAG.getBitcast(MaskVT, Src3);
22657 // Reverse the operands to match VSELECT order.
22658 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
22661 SDValue Src1 = Op.getOperand(1);
22662 SDValue Src2 = Op.getOperand(2);
22664 // Swap Src1 and Src2 in the node creation
22665 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
22668 // NOTE: We need to swizzle the operands to pass the multiply operands
22670 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22671 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
22673 SDValue Src1 = Op.getOperand(1);
22674 SDValue Imm = Op.getOperand(2);
22675 SDValue Mask = Op.getOperand(3);
22676 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
22677 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
22679 // Need to fill with zeros to ensure the bitcast will produce zeroes
22680 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22681 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
22682 DAG.getConstant(0, dl, MVT::v8i1),
22683 FPclassMask, DAG.getIntPtrConstant(0, dl));
22684 return DAG.getBitcast(MVT::i8, Ins);
22687 case CMP_MASK_CC: {
22688 MVT MaskVT = Op.getSimpleValueType();
22689 SDValue CC = Op.getOperand(3);
22690 CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
22691 // We specify 2 possible opcodes for intrinsics with rounding modes.
22692 // First, we check if the intrinsic may have non-default rounding mode,
22693 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22694 if (IntrData->Opc1 != 0) {
22695 SDValue Sae = Op.getOperand(4);
22696 if (isRoundModeSAE(Sae))
22697 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
22698 Op.getOperand(2), CC, Sae);
22699 if (!isRoundModeCurDirection(Sae))
22702 //default rounding mode
22703 return DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
22704 Op.getOperand(2), CC);
22706 case CMP_MASK_SCALAR_CC: {
22707 SDValue Src1 = Op.getOperand(1);
22708 SDValue Src2 = Op.getOperand(2);
22709 SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
22710 SDValue Mask = Op.getOperand(4);
22713 if (IntrData->Opc1 != 0) {
22714 SDValue Sae = Op.getOperand(5);
22715 if (isRoundModeSAE(Sae))
22716 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
22717 else if (!isRoundModeCurDirection(Sae))
22720 //default rounding mode
22721 if (!Cmp.getNode())
22722 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
22724 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
22726 // Need to fill with zeros to ensure the bitcast will produce zeroes
22727 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22728 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
22729 DAG.getConstant(0, dl, MVT::v8i1),
22730 CmpMask, DAG.getIntPtrConstant(0, dl));
22731 return DAG.getBitcast(MVT::i8, Ins);
22733 case COMI: { // Comparison intrinsics
22734 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
22735 SDValue LHS = Op.getOperand(1);
22736 SDValue RHS = Op.getOperand(2);
22737 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
22738 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
22741 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
22742 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
22743 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
22744 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
22747 case ISD::SETNE: { // (ZF = 1 or PF = 1)
22748 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
22749 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
22750 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
22753 case ISD::SETGT: // (CF = 0 and ZF = 0)
22754 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
22756 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
22757 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
22760 case ISD::SETGE: // CF = 0
22761 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
22763 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
22764 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
22767 llvm_unreachable("Unexpected illegal condition!");
22769 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
22771 case COMI_RM: { // Comparison intrinsics with Sae
22772 SDValue LHS = Op.getOperand(1);
22773 SDValue RHS = Op.getOperand(2);
22774 unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
22775 SDValue Sae = Op.getOperand(4);
22778 if (isRoundModeCurDirection(Sae))
22779 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
22780 DAG.getConstant(CondVal, dl, MVT::i8));
22781 else if (isRoundModeSAE(Sae))
22782 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
22783 DAG.getConstant(CondVal, dl, MVT::i8), Sae);
22786 // Need to fill with zeros to ensure the bitcast will produce zeroes
22787 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
22788 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
22789 DAG.getConstant(0, dl, MVT::v16i1),
22790 FCmp, DAG.getIntPtrConstant(0, dl));
22791 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
22792 DAG.getBitcast(MVT::i16, Ins));
22795 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
22796 Op.getOperand(1), Op.getOperand(2), Subtarget,
22798 case COMPRESS_EXPAND_IN_REG: {
22799 SDValue Mask = Op.getOperand(3);
22800 SDValue DataToCompress = Op.getOperand(1);
22801 SDValue PassThru = Op.getOperand(2);
22802 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
22803 return Op.getOperand(1);
22805 // Avoid false dependency.
22806 if (PassThru.isUndef())
22807 PassThru = DAG.getConstant(0, dl, VT);
22809 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
22813 case FIXUPIMM_MASKZ: {
22814 SDValue Src1 = Op.getOperand(1);
22815 SDValue Src2 = Op.getOperand(2);
22816 SDValue Src3 = Op.getOperand(3);
22817 SDValue Imm = Op.getOperand(4);
22818 SDValue Mask = Op.getOperand(5);
22819 SDValue Passthru = (IntrData->Type == FIXUPIMM)
22821 : getZeroVector(VT, Subtarget, DAG, dl);
22823 unsigned Opc = IntrData->Opc0;
22824 if (IntrData->Opc1 != 0) {
22825 SDValue Sae = Op.getOperand(6);
22826 if (isRoundModeSAE(Sae))
22827 Opc = IntrData->Opc1;
22828 else if (!isRoundModeCurDirection(Sae))
22832 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
22834 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
22835 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
22837 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
22840 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
22841 // Clear the upper bits of the rounding immediate so that the legacy
22842 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
22843 SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
22845 DAG.getConstant(0xf, dl, MVT::i32));
22846 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22847 Op.getOperand(1), RoundingMode);
22850 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
22851 // Clear the upper bits of the rounding immediate so that the legacy
22852 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
22853 SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
22855 DAG.getConstant(0xf, dl, MVT::i32));
22856 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22857 Op.getOperand(1), Op.getOperand(2), RoundingMode);
22861 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
22862 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
22865 // If the carry in is zero, then we should just use ADD/SUB instead of
22867 if (isNullConstant(Op.getOperand(1))) {
22868 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
22871 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
22872 DAG.getConstant(-1, dl, MVT::i8));
22873 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
22874 Op.getOperand(3), GenCF.getValue(1));
22876 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
22877 SDValue Results[] = { SetCC, Res };
22878 return DAG.getMergeValues(Results, dl);
22880 case CVTPD2PS_MASK:
22881 case CVTPD2DQ_MASK:
22882 case CVTQQ2PS_MASK:
22883 case TRUNCATE_TO_REG: {
22884 SDValue Src = Op.getOperand(1);
22885 SDValue PassThru = Op.getOperand(2);
22886 SDValue Mask = Op.getOperand(3);
22888 if (isAllOnesConstant(Mask))
22889 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
22891 MVT SrcVT = Src.getSimpleValueType();
22892 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
22893 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22894 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
22897 case CVTPS2PH_MASK: {
22898 SDValue Src = Op.getOperand(1);
22899 SDValue Rnd = Op.getOperand(2);
22900 SDValue PassThru = Op.getOperand(3);
22901 SDValue Mask = Op.getOperand(4);
22903 if (isAllOnesConstant(Mask))
22904 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
22906 MVT SrcVT = Src.getSimpleValueType();
22907 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
22908 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22909 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
22913 case CVTNEPS2BF16_MASK: {
22914 SDValue Src = Op.getOperand(1);
22915 SDValue PassThru = Op.getOperand(2);
22916 SDValue Mask = Op.getOperand(3);
22918 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
22919 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
22921 // Break false dependency.
22922 if (PassThru.isUndef())
22923 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
22925 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
22934 default: return SDValue(); // Don't custom lower most intrinsics.
22936 // ptest and testp intrinsics. The intrinsic these come from are designed to
22937 // return an integer value, not just an instruction so lower it to the ptest
22938 // or testp pattern and a setcc for the result.
22939 case Intrinsic::x86_avx512_ktestc_b:
22940 case Intrinsic::x86_avx512_ktestc_w:
22941 case Intrinsic::x86_avx512_ktestc_d:
22942 case Intrinsic::x86_avx512_ktestc_q:
22943 case Intrinsic::x86_avx512_ktestz_b:
22944 case Intrinsic::x86_avx512_ktestz_w:
22945 case Intrinsic::x86_avx512_ktestz_d:
22946 case Intrinsic::x86_avx512_ktestz_q:
22947 case Intrinsic::x86_sse41_ptestz:
22948 case Intrinsic::x86_sse41_ptestc:
22949 case Intrinsic::x86_sse41_ptestnzc:
22950 case Intrinsic::x86_avx_ptestz_256:
22951 case Intrinsic::x86_avx_ptestc_256:
22952 case Intrinsic::x86_avx_ptestnzc_256:
22953 case Intrinsic::x86_avx_vtestz_ps:
22954 case Intrinsic::x86_avx_vtestc_ps:
22955 case Intrinsic::x86_avx_vtestnzc_ps:
22956 case Intrinsic::x86_avx_vtestz_pd:
22957 case Intrinsic::x86_avx_vtestc_pd:
22958 case Intrinsic::x86_avx_vtestnzc_pd:
22959 case Intrinsic::x86_avx_vtestz_ps_256:
22960 case Intrinsic::x86_avx_vtestc_ps_256:
22961 case Intrinsic::x86_avx_vtestnzc_ps_256:
22962 case Intrinsic::x86_avx_vtestz_pd_256:
22963 case Intrinsic::x86_avx_vtestc_pd_256:
22964 case Intrinsic::x86_avx_vtestnzc_pd_256: {
22965 unsigned TestOpc = X86ISD::PTEST;
22966 X86::CondCode X86CC;
22968 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
22969 case Intrinsic::x86_avx512_ktestc_b:
22970 case Intrinsic::x86_avx512_ktestc_w:
22971 case Intrinsic::x86_avx512_ktestc_d:
22972 case Intrinsic::x86_avx512_ktestc_q:
22974 TestOpc = X86ISD::KTEST;
22975 X86CC = X86::COND_B;
22977 case Intrinsic::x86_avx512_ktestz_b:
22978 case Intrinsic::x86_avx512_ktestz_w:
22979 case Intrinsic::x86_avx512_ktestz_d:
22980 case Intrinsic::x86_avx512_ktestz_q:
22981 TestOpc = X86ISD::KTEST;
22982 X86CC = X86::COND_E;
22984 case Intrinsic::x86_avx_vtestz_ps:
22985 case Intrinsic::x86_avx_vtestz_pd:
22986 case Intrinsic::x86_avx_vtestz_ps_256:
22987 case Intrinsic::x86_avx_vtestz_pd_256:
22988 TestOpc = X86ISD::TESTP;
22990 case Intrinsic::x86_sse41_ptestz:
22991 case Intrinsic::x86_avx_ptestz_256:
22993 X86CC = X86::COND_E;
22995 case Intrinsic::x86_avx_vtestc_ps:
22996 case Intrinsic::x86_avx_vtestc_pd:
22997 case Intrinsic::x86_avx_vtestc_ps_256:
22998 case Intrinsic::x86_avx_vtestc_pd_256:
22999 TestOpc = X86ISD::TESTP;
23001 case Intrinsic::x86_sse41_ptestc:
23002 case Intrinsic::x86_avx_ptestc_256:
23004 X86CC = X86::COND_B;
23006 case Intrinsic::x86_avx_vtestnzc_ps:
23007 case Intrinsic::x86_avx_vtestnzc_pd:
23008 case Intrinsic::x86_avx_vtestnzc_ps_256:
23009 case Intrinsic::x86_avx_vtestnzc_pd_256:
23010 TestOpc = X86ISD::TESTP;
23012 case Intrinsic::x86_sse41_ptestnzc:
23013 case Intrinsic::x86_avx_ptestnzc_256:
23015 X86CC = X86::COND_A;
23019 SDValue LHS = Op.getOperand(1);
23020 SDValue RHS = Op.getOperand(2);
23021 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
23022 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
23023 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23026 case Intrinsic::x86_sse42_pcmpistria128:
23027 case Intrinsic::x86_sse42_pcmpestria128:
23028 case Intrinsic::x86_sse42_pcmpistric128:
23029 case Intrinsic::x86_sse42_pcmpestric128:
23030 case Intrinsic::x86_sse42_pcmpistrio128:
23031 case Intrinsic::x86_sse42_pcmpestrio128:
23032 case Intrinsic::x86_sse42_pcmpistris128:
23033 case Intrinsic::x86_sse42_pcmpestris128:
23034 case Intrinsic::x86_sse42_pcmpistriz128:
23035 case Intrinsic::x86_sse42_pcmpestriz128: {
23037 X86::CondCode X86CC;
23039 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
23040 case Intrinsic::x86_sse42_pcmpistria128:
23041 Opcode = X86ISD::PCMPISTR;
23042 X86CC = X86::COND_A;
23044 case Intrinsic::x86_sse42_pcmpestria128:
23045 Opcode = X86ISD::PCMPESTR;
23046 X86CC = X86::COND_A;
23048 case Intrinsic::x86_sse42_pcmpistric128:
23049 Opcode = X86ISD::PCMPISTR;
23050 X86CC = X86::COND_B;
23052 case Intrinsic::x86_sse42_pcmpestric128:
23053 Opcode = X86ISD::PCMPESTR;
23054 X86CC = X86::COND_B;
23056 case Intrinsic::x86_sse42_pcmpistrio128:
23057 Opcode = X86ISD::PCMPISTR;
23058 X86CC = X86::COND_O;
23060 case Intrinsic::x86_sse42_pcmpestrio128:
23061 Opcode = X86ISD::PCMPESTR;
23062 X86CC = X86::COND_O;
23064 case Intrinsic::x86_sse42_pcmpistris128:
23065 Opcode = X86ISD::PCMPISTR;
23066 X86CC = X86::COND_S;
23068 case Intrinsic::x86_sse42_pcmpestris128:
23069 Opcode = X86ISD::PCMPESTR;
23070 X86CC = X86::COND_S;
23072 case Intrinsic::x86_sse42_pcmpistriz128:
23073 Opcode = X86ISD::PCMPISTR;
23074 X86CC = X86::COND_E;
23076 case Intrinsic::x86_sse42_pcmpestriz128:
23077 Opcode = X86ISD::PCMPESTR;
23078 X86CC = X86::COND_E;
23081 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23082 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23083 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
23084 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
23085 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23088 case Intrinsic::x86_sse42_pcmpistri128:
23089 case Intrinsic::x86_sse42_pcmpestri128: {
23091 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
23092 Opcode = X86ISD::PCMPISTR;
23094 Opcode = X86ISD::PCMPESTR;
23096 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23097 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23098 return DAG.getNode(Opcode, dl, VTs, NewOps);
23101 case Intrinsic::x86_sse42_pcmpistrm128:
23102 case Intrinsic::x86_sse42_pcmpestrm128: {
23104 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
23105 Opcode = X86ISD::PCMPISTR;
23107 Opcode = X86ISD::PCMPESTR;
23109 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23110 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23111 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
23114 case Intrinsic::eh_sjlj_lsda: {
23115 MachineFunction &MF = DAG.getMachineFunction();
23116 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23117 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23118 auto &Context = MF.getMMI().getContext();
23119 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
23120 Twine(MF.getFunctionNumber()));
23121 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
23122 DAG.getMCSymbol(S, PtrVT));
23125 case Intrinsic::x86_seh_lsda: {
23126 // Compute the symbol for the LSDA. We know it'll get emitted later.
23127 MachineFunction &MF = DAG.getMachineFunction();
23128 SDValue Op1 = Op.getOperand(1);
23129 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
23130 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
23131 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23133 // Generate a simple absolute symbol reference. This intrinsic is only
23134 // supported on 32-bit Windows, which isn't PIC.
23135 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
23136 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
23139 case Intrinsic::eh_recoverfp: {
23140 SDValue FnOp = Op.getOperand(1);
23141 SDValue IncomingFPOp = Op.getOperand(2);
23142 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
23143 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
23145 report_fatal_error(
23146 "llvm.eh.recoverfp must take a function as the first argument");
23147 return recoverFramePointer(DAG, Fn, IncomingFPOp);
23150 case Intrinsic::localaddress: {
23151 // Returns one of the stack, base, or frame pointer registers, depending on
23152 // which is used to reference local variables.
23153 MachineFunction &MF = DAG.getMachineFunction();
23154 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23156 if (RegInfo->hasBasePointer(MF))
23157 Reg = RegInfo->getBaseRegister();
23158 else { // Handles the SP or FP case.
23159 bool CantUseFP = RegInfo->needsStackRealignment(MF);
23161 Reg = RegInfo->getPtrSizedStackRegister(MF);
23163 Reg = RegInfo->getPtrSizedFrameRegister(MF);
23165 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
23168 case Intrinsic::x86_avx512_vp2intersect_q_512:
23169 case Intrinsic::x86_avx512_vp2intersect_q_256:
23170 case Intrinsic::x86_avx512_vp2intersect_q_128:
23171 case Intrinsic::x86_avx512_vp2intersect_d_512:
23172 case Intrinsic::x86_avx512_vp2intersect_d_256:
23173 case Intrinsic::x86_avx512_vp2intersect_d_128: {
23174 MVT MaskVT = Op.getSimpleValueType();
23176 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
23179 SDValue Operation =
23180 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
23181 Op->getOperand(1), Op->getOperand(2));
23183 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
23184 MaskVT, Operation);
23185 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
23186 MaskVT, Operation);
23187 return DAG.getMergeValues({Result0, Result1}, DL);
23192 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23193 SDValue Src, SDValue Mask, SDValue Base,
23194 SDValue Index, SDValue ScaleOp, SDValue Chain,
23195 const X86Subtarget &Subtarget) {
23197 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23198 // Scale must be constant.
23201 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23202 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
23203 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23204 // If source is undef or we know it won't be used, use a zero vector
23205 // to break register dependency.
23206 // TODO: use undef instead and let BreakFalseDeps deal with it?
23207 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23208 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23210 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23212 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23213 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23214 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23215 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23218 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
23219 SDValue Src, SDValue Mask, SDValue Base,
23220 SDValue Index, SDValue ScaleOp, SDValue Chain,
23221 const X86Subtarget &Subtarget) {
23222 MVT VT = Op.getSimpleValueType();
23224 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23225 // Scale must be constant.
23228 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23229 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23230 VT.getVectorNumElements());
23231 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23233 // We support two versions of the gather intrinsics. One with scalar mask and
23234 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23235 if (Mask.getValueType() != MaskVT)
23236 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23238 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23239 // If source is undef or we know it won't be used, use a zero vector
23240 // to break register dependency.
23241 // TODO: use undef instead and let BreakFalseDeps deal with it?
23242 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23243 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23245 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23247 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23248 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23249 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23250 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23253 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23254 SDValue Src, SDValue Mask, SDValue Base,
23255 SDValue Index, SDValue ScaleOp, SDValue Chain,
23256 const X86Subtarget &Subtarget) {
23258 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23259 // Scale must be constant.
23262 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23263 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23264 Src.getSimpleValueType().getVectorNumElements());
23265 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23267 // We support two versions of the scatter intrinsics. One with scalar mask and
23268 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23269 if (Mask.getValueType() != MaskVT)
23270 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23272 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23274 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
23275 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
23276 SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
23277 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23278 return Res.getValue(1);
23281 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23282 SDValue Mask, SDValue Base, SDValue Index,
23283 SDValue ScaleOp, SDValue Chain,
23284 const X86Subtarget &Subtarget) {
23286 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23287 // Scale must be constant.
23290 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
23291 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
23292 SDValue Segment = DAG.getRegister(0, MVT::i32);
23294 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
23295 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23296 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
23297 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
23298 return SDValue(Res, 0);
23301 /// Handles the lowering of builtin intrinsics with chain that return their
23302 /// value into registers EDX:EAX.
23303 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
23304 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
23306 /// Returns a Glue value which can be used to add extra copy-from-reg if the
23307 /// expanded intrinsics implicitly defines extra registers (i.e. not just
23309 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
23311 unsigned TargetOpcode,
23313 const X86Subtarget &Subtarget,
23314 SmallVectorImpl<SDValue> &Results) {
23315 SDValue Chain = N->getOperand(0);
23319 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
23320 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
23321 Glue = Chain.getValue(1);
23324 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
23325 SDValue N1Ops[] = {Chain, Glue};
23326 SDNode *N1 = DAG.getMachineNode(
23327 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
23328 Chain = SDValue(N1, 0);
23330 // Reads the content of XCR and returns it in registers EDX:EAX.
23332 if (Subtarget.is64Bit()) {
23333 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
23334 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
23337 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
23338 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
23341 Chain = HI.getValue(1);
23342 Glue = HI.getValue(2);
23344 if (Subtarget.is64Bit()) {
23345 // Merge the two 32-bit values into a 64-bit one.
23346 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
23347 DAG.getConstant(32, DL, MVT::i8));
23348 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
23349 Results.push_back(Chain);
23353 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
23354 SDValue Ops[] = { LO, HI };
23355 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
23356 Results.push_back(Pair);
23357 Results.push_back(Chain);
23361 /// Handles the lowering of builtin intrinsics that read the time stamp counter
23362 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
23363 /// READCYCLECOUNTER nodes.
23364 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
23366 const X86Subtarget &Subtarget,
23367 SmallVectorImpl<SDValue> &Results) {
23368 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
23369 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
23370 // and the EAX register is loaded with the low-order 32 bits.
23371 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
23372 /* NoRegister */0, Subtarget,
23374 if (Opcode != X86::RDTSCP)
23377 SDValue Chain = Results[1];
23378 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
23379 // the ECX register. Add 'ecx' explicitly to the chain.
23380 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
23382 Results.push_back(ecx.getValue(1));
23385 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
23386 SelectionDAG &DAG) {
23387 SmallVector<SDValue, 3> Results;
23389 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
23391 return DAG.getMergeValues(Results, DL);
23394 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
23395 MachineFunction &MF = DAG.getMachineFunction();
23396 SDValue Chain = Op.getOperand(0);
23397 SDValue RegNode = Op.getOperand(2);
23398 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23400 report_fatal_error("EH registrations only live in functions using WinEH");
23402 // Cast the operand to an alloca, and remember the frame index.
23403 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
23405 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
23406 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
23408 // Return the chain operand without making any DAG nodes.
23412 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
23413 MachineFunction &MF = DAG.getMachineFunction();
23414 SDValue Chain = Op.getOperand(0);
23415 SDValue EHGuard = Op.getOperand(2);
23416 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23418 report_fatal_error("EHGuard only live in functions using WinEH");
23420 // Cast the operand to an alloca, and remember the frame index.
23421 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
23423 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
23424 EHInfo->EHGuardFrameIndex = FINode->getIndex();
23426 // Return the chain operand without making any DAG nodes.
23430 /// Emit Truncating Store with signed or unsigned saturation.
23432 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
23433 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
23434 SelectionDAG &DAG) {
23436 SDVTList VTs = DAG.getVTList(MVT::Other);
23437 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
23438 SDValue Ops[] = { Chain, Val, Ptr, Undef };
23440 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23441 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23444 /// Emit Masked Truncating Store with signed or unsigned saturation.
23446 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
23447 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
23448 MachineMemOperand *MMO, SelectionDAG &DAG) {
23450 SDVTList VTs = DAG.getVTList(MVT::Other);
23451 SDValue Ops[] = { Chain, Val, Ptr, Mask };
23453 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23454 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23457 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
23458 SelectionDAG &DAG) {
23459 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
23461 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
23464 case llvm::Intrinsic::x86_seh_ehregnode:
23465 return MarkEHRegistrationNode(Op, DAG);
23466 case llvm::Intrinsic::x86_seh_ehguard:
23467 return MarkEHGuard(Op, DAG);
23468 case llvm::Intrinsic::x86_rdpkru: {
23470 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23471 // Create a RDPKRU node and pass 0 to the ECX parameter.
23472 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
23473 DAG.getConstant(0, dl, MVT::i32));
23475 case llvm::Intrinsic::x86_wrpkru: {
23477 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
23478 // to the EDX and ECX parameters.
23479 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
23480 Op.getOperand(0), Op.getOperand(2),
23481 DAG.getConstant(0, dl, MVT::i32),
23482 DAG.getConstant(0, dl, MVT::i32));
23484 case llvm::Intrinsic::x86_flags_read_u32:
23485 case llvm::Intrinsic::x86_flags_read_u64:
23486 case llvm::Intrinsic::x86_flags_write_u32:
23487 case llvm::Intrinsic::x86_flags_write_u64: {
23488 // We need a frame pointer because this will get lowered to a PUSH/POP
23490 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23491 MFI.setHasCopyImplyingStackAdjustment(true);
23492 // Don't do anything here, we will expand these intrinsics out later
23493 // during FinalizeISel in EmitInstrWithCustomInserter.
23496 case Intrinsic::x86_lwpins32:
23497 case Intrinsic::x86_lwpins64:
23498 case Intrinsic::x86_umwait:
23499 case Intrinsic::x86_tpause: {
23501 SDValue Chain = Op->getOperand(0);
23502 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23506 default: llvm_unreachable("Impossible intrinsic");
23507 case Intrinsic::x86_umwait:
23508 Opcode = X86ISD::UMWAIT;
23510 case Intrinsic::x86_tpause:
23511 Opcode = X86ISD::TPAUSE;
23513 case Intrinsic::x86_lwpins32:
23514 case Intrinsic::x86_lwpins64:
23515 Opcode = X86ISD::LWPINS;
23519 SDValue Operation =
23520 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
23521 Op->getOperand(3), Op->getOperand(4));
23522 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
23523 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23524 Operation.getValue(1));
23526 case Intrinsic::x86_enqcmd:
23527 case Intrinsic::x86_enqcmds: {
23529 SDValue Chain = Op.getOperand(0);
23530 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23533 default: llvm_unreachable("Impossible intrinsic!");
23534 case Intrinsic::x86_enqcmd:
23535 Opcode = X86ISD::ENQCMD;
23537 case Intrinsic::x86_enqcmds:
23538 Opcode = X86ISD::ENQCMDS;
23541 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
23543 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
23544 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23545 Operation.getValue(1));
23552 switch(IntrData->Type) {
23553 default: llvm_unreachable("Unknown Intrinsic Type");
23556 // Emit the node with the right value type.
23557 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
23558 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
23560 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
23561 // Otherwise return the value from Rand, which is always 0, casted to i32.
23562 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
23563 DAG.getConstant(1, dl, Op->getValueType(1)),
23564 DAG.getConstant(X86::COND_B, dl, MVT::i8),
23565 SDValue(Result.getNode(), 1) };
23566 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
23568 // Return { result, isValid, chain }.
23569 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
23570 SDValue(Result.getNode(), 2));
23572 case GATHER_AVX2: {
23573 SDValue Chain = Op.getOperand(0);
23574 SDValue Src = Op.getOperand(2);
23575 SDValue Base = Op.getOperand(3);
23576 SDValue Index = Op.getOperand(4);
23577 SDValue Mask = Op.getOperand(5);
23578 SDValue Scale = Op.getOperand(6);
23579 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
23580 Scale, Chain, Subtarget);
23583 //gather(v1, mask, index, base, scale);
23584 SDValue Chain = Op.getOperand(0);
23585 SDValue Src = Op.getOperand(2);
23586 SDValue Base = Op.getOperand(3);
23587 SDValue Index = Op.getOperand(4);
23588 SDValue Mask = Op.getOperand(5);
23589 SDValue Scale = Op.getOperand(6);
23590 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
23594 //scatter(base, mask, index, v1, scale);
23595 SDValue Chain = Op.getOperand(0);
23596 SDValue Base = Op.getOperand(2);
23597 SDValue Mask = Op.getOperand(3);
23598 SDValue Index = Op.getOperand(4);
23599 SDValue Src = Op.getOperand(5);
23600 SDValue Scale = Op.getOperand(6);
23601 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
23602 Scale, Chain, Subtarget);
23605 SDValue Hint = Op.getOperand(6);
23606 unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
23607 assert((HintVal == 2 || HintVal == 3) &&
23608 "Wrong prefetch hint in intrinsic: should be 2 or 3");
23609 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
23610 SDValue Chain = Op.getOperand(0);
23611 SDValue Mask = Op.getOperand(2);
23612 SDValue Index = Op.getOperand(3);
23613 SDValue Base = Op.getOperand(4);
23614 SDValue Scale = Op.getOperand(5);
23615 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
23618 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
23620 SmallVector<SDValue, 2> Results;
23621 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
23623 return DAG.getMergeValues(Results, dl);
23625 // Read Performance Monitoring Counters.
23627 // GetExtended Control Register.
23629 SmallVector<SDValue, 2> Results;
23631 // RDPMC uses ECX to select the index of the performance counter to read.
23632 // XGETBV uses ECX to select the index of the XCR register to return.
23633 // The result is stored into registers EDX:EAX.
23634 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
23635 Subtarget, Results);
23636 return DAG.getMergeValues(Results, dl);
23638 // XTEST intrinsics.
23640 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
23641 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
23643 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
23644 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
23645 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
23646 Ret, SDValue(InTrans.getNode(), 1));
23648 case TRUNCATE_TO_MEM_VI8:
23649 case TRUNCATE_TO_MEM_VI16:
23650 case TRUNCATE_TO_MEM_VI32: {
23651 SDValue Mask = Op.getOperand(4);
23652 SDValue DataToTruncate = Op.getOperand(3);
23653 SDValue Addr = Op.getOperand(2);
23654 SDValue Chain = Op.getOperand(0);
23656 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
23657 assert(MemIntr && "Expected MemIntrinsicSDNode!");
23659 EVT MemVT = MemIntr->getMemoryVT();
23661 uint16_t TruncationOp = IntrData->Opc0;
23662 switch (TruncationOp) {
23663 case X86ISD::VTRUNC: {
23664 if (isAllOnesConstant(Mask)) // return just a truncate store
23665 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
23666 MemIntr->getMemOperand());
23668 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
23669 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23671 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
23672 MemIntr->getMemOperand(), true /* truncating */);
23674 case X86ISD::VTRUNCUS:
23675 case X86ISD::VTRUNCS: {
23676 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
23677 if (isAllOnesConstant(Mask))
23678 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
23679 MemIntr->getMemOperand(), DAG);
23681 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
23682 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23684 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
23685 VMask, MemVT, MemIntr->getMemOperand(), DAG);
23688 llvm_unreachable("Unsupported truncstore intrinsic");
23694 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
23695 SelectionDAG &DAG) const {
23696 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23697 MFI.setReturnAddressIsTaken(true);
23699 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
23702 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
23704 EVT PtrVT = getPointerTy(DAG.getDataLayout());
23707 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
23708 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23709 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
23710 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
23711 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
23712 MachinePointerInfo());
23715 // Just load the return address.
23716 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
23717 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
23718 MachinePointerInfo());
23721 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
23722 SelectionDAG &DAG) const {
23723 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
23724 return getReturnAddressFrameIndex(DAG);
23727 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
23728 MachineFunction &MF = DAG.getMachineFunction();
23729 MachineFrameInfo &MFI = MF.getFrameInfo();
23730 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23731 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23732 EVT VT = Op.getValueType();
23734 MFI.setFrameAddressIsTaken(true);
23736 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
23737 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
23738 // is not possible to crawl up the stack without looking at the unwind codes
23740 int FrameAddrIndex = FuncInfo->getFAIndex();
23741 if (!FrameAddrIndex) {
23742 // Set up a frame object for the return address.
23743 unsigned SlotSize = RegInfo->getSlotSize();
23744 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
23745 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
23746 FuncInfo->setFAIndex(FrameAddrIndex);
23748 return DAG.getFrameIndex(FrameAddrIndex, VT);
23751 unsigned FrameReg =
23752 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
23753 SDLoc dl(Op); // FIXME probably not meaningful
23754 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
23755 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
23756 (FrameReg == X86::EBP && VT == MVT::i32)) &&
23757 "Invalid Frame Register!");
23758 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
23760 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
23761 MachinePointerInfo());
23765 // FIXME? Maybe this could be a TableGen attribute on some registers and
23766 // this table could be generated automatically from RegInfo.
23767 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
23768 SelectionDAG &DAG) const {
23769 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23770 const MachineFunction &MF = DAG.getMachineFunction();
23772 unsigned Reg = StringSwitch<unsigned>(RegName)
23773 .Case("esp", X86::ESP)
23774 .Case("rsp", X86::RSP)
23775 .Case("ebp", X86::EBP)
23776 .Case("rbp", X86::RBP)
23779 if (Reg == X86::EBP || Reg == X86::RBP) {
23780 if (!TFI.hasFP(MF))
23781 report_fatal_error("register " + StringRef(RegName) +
23782 " is allocatable: function has no frame pointer");
23785 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23786 unsigned FrameReg =
23787 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
23788 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
23789 "Invalid Frame Register!");
23797 report_fatal_error("Invalid register name global variable");
23800 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
23801 SelectionDAG &DAG) const {
23802 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23803 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
23806 unsigned X86TargetLowering::getExceptionPointerRegister(
23807 const Constant *PersonalityFn) const {
23808 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
23809 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
23811 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
23814 unsigned X86TargetLowering::getExceptionSelectorRegister(
23815 const Constant *PersonalityFn) const {
23816 // Funclet personalities don't use selectors (the runtime does the selection).
23817 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
23818 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
23821 bool X86TargetLowering::needsFixedCatchObjects() const {
23822 return Subtarget.isTargetWin64();
23825 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
23826 SDValue Chain = Op.getOperand(0);
23827 SDValue Offset = Op.getOperand(1);
23828 SDValue Handler = Op.getOperand(2);
23831 EVT PtrVT = getPointerTy(DAG.getDataLayout());
23832 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23833 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
23834 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
23835 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
23836 "Invalid Frame Register!");
23837 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
23838 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
23840 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
23841 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
23843 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
23844 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
23845 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
23847 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
23848 DAG.getRegister(StoreAddrReg, PtrVT));
23851 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
23852 SelectionDAG &DAG) const {
23854 // If the subtarget is not 64bit, we may need the global base reg
23855 // after isel expand pseudo, i.e., after CGBR pass ran.
23856 // Therefore, ask for the GlobalBaseReg now, so that the pass
23857 // inserts the code for us in case we need it.
23858 // Otherwise, we will end up in a situation where we will
23859 // reference a virtual register that is not defined!
23860 if (!Subtarget.is64Bit()) {
23861 const X86InstrInfo *TII = Subtarget.getInstrInfo();
23862 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
23864 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
23865 DAG.getVTList(MVT::i32, MVT::Other),
23866 Op.getOperand(0), Op.getOperand(1));
23869 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
23870 SelectionDAG &DAG) const {
23872 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
23873 Op.getOperand(0), Op.getOperand(1));
23876 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
23877 SelectionDAG &DAG) const {
23879 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
23883 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
23884 return Op.getOperand(0);
23887 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
23888 SelectionDAG &DAG) const {
23889 SDValue Root = Op.getOperand(0);
23890 SDValue Trmp = Op.getOperand(1); // trampoline
23891 SDValue FPtr = Op.getOperand(2); // nested function
23892 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
23895 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
23896 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
23898 if (Subtarget.is64Bit()) {
23899 SDValue OutChains[6];
23901 // Large code-model.
23902 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
23903 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
23905 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
23906 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
23908 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
23910 // Load the pointer to the nested function into R11.
23911 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
23912 SDValue Addr = Trmp;
23913 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23914 Addr, MachinePointerInfo(TrmpAddr));
23916 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23917 DAG.getConstant(2, dl, MVT::i64));
23919 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
23920 /* Alignment = */ 2);
23922 // Load the 'nest' parameter value into R10.
23923 // R10 is specified in X86CallingConv.td
23924 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
23925 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23926 DAG.getConstant(10, dl, MVT::i64));
23927 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23928 Addr, MachinePointerInfo(TrmpAddr, 10));
23930 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23931 DAG.getConstant(12, dl, MVT::i64));
23933 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
23934 /* Alignment = */ 2);
23936 // Jump to the nested function.
23937 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
23938 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23939 DAG.getConstant(20, dl, MVT::i64));
23940 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
23941 Addr, MachinePointerInfo(TrmpAddr, 20));
23943 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
23944 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
23945 DAG.getConstant(22, dl, MVT::i64));
23946 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
23947 Addr, MachinePointerInfo(TrmpAddr, 22));
23949 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
23951 const Function *Func =
23952 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
23953 CallingConv::ID CC = Func->getCallingConv();
23958 llvm_unreachable("Unsupported calling convention");
23959 case CallingConv::C:
23960 case CallingConv::X86_StdCall: {
23961 // Pass 'nest' parameter in ECX.
23962 // Must be kept in sync with X86CallingConv.td
23963 NestReg = X86::ECX;
23965 // Check that ECX wasn't needed by an 'inreg' parameter.
23966 FunctionType *FTy = Func->getFunctionType();
23967 const AttributeList &Attrs = Func->getAttributes();
23969 if (!Attrs.isEmpty() && !Func->isVarArg()) {
23970 unsigned InRegCount = 0;
23973 for (FunctionType::param_iterator I = FTy->param_begin(),
23974 E = FTy->param_end(); I != E; ++I, ++Idx)
23975 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
23976 auto &DL = DAG.getDataLayout();
23977 // FIXME: should only count parameters that are lowered to integers.
23978 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
23981 if (InRegCount > 2) {
23982 report_fatal_error("Nest register in use - reduce number of inreg"
23988 case CallingConv::X86_FastCall:
23989 case CallingConv::X86_ThisCall:
23990 case CallingConv::Fast:
23991 // Pass 'nest' parameter in EAX.
23992 // Must be kept in sync with X86CallingConv.td
23993 NestReg = X86::EAX;
23997 SDValue OutChains[4];
23998 SDValue Addr, Disp;
24000 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24001 DAG.getConstant(10, dl, MVT::i32));
24002 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
24004 // This is storing the opcode for MOV32ri.
24005 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
24006 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
24008 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
24009 Trmp, MachinePointerInfo(TrmpAddr));
24011 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24012 DAG.getConstant(1, dl, MVT::i32));
24014 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
24015 /* Alignment = */ 1);
24017 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
24018 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24019 DAG.getConstant(5, dl, MVT::i32));
24020 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
24021 Addr, MachinePointerInfo(TrmpAddr, 5),
24022 /* Alignment = */ 1);
24024 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24025 DAG.getConstant(6, dl, MVT::i32));
24027 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
24028 /* Alignment = */ 1);
24030 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
24034 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
24035 SelectionDAG &DAG) const {
24037 The rounding mode is in bits 11:10 of FPSR, and has the following
24039 00 Round to nearest
24044 FLT_ROUNDS, on the other hand, expects the following:
24051 To perform the conversion, we do:
24052 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
24055 MachineFunction &MF = DAG.getMachineFunction();
24056 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24057 unsigned StackAlignment = TFI.getStackAlignment();
24058 MVT VT = Op.getSimpleValueType();
24061 // Save FP Control Word to stack slot
24062 int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
24063 SDValue StackSlot =
24064 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
24066 MachineMemOperand *MMO =
24067 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
24068 MachineMemOperand::MOStore, 2, 2);
24070 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
24071 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
24072 DAG.getVTList(MVT::Other),
24073 Ops, MVT::i16, MMO);
24075 // Load FP Control Word from stack slot
24077 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
24079 // Transform as necessary
24081 DAG.getNode(ISD::SRL, DL, MVT::i16,
24082 DAG.getNode(ISD::AND, DL, MVT::i16,
24083 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
24084 DAG.getConstant(11, DL, MVT::i8));
24086 DAG.getNode(ISD::SRL, DL, MVT::i16,
24087 DAG.getNode(ISD::AND, DL, MVT::i16,
24088 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
24089 DAG.getConstant(9, DL, MVT::i8));
24092 DAG.getNode(ISD::AND, DL, MVT::i16,
24093 DAG.getNode(ISD::ADD, DL, MVT::i16,
24094 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
24095 DAG.getConstant(1, DL, MVT::i16)),
24096 DAG.getConstant(3, DL, MVT::i16));
24098 return DAG.getNode((VT.getSizeInBits() < 16 ?
24099 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
24102 // Split an unary integer op into 2 half sized ops.
24103 static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
24104 MVT VT = Op.getSimpleValueType();
24105 unsigned NumElems = VT.getVectorNumElements();
24106 unsigned SizeInBits = VT.getSizeInBits();
24107 MVT EltVT = VT.getVectorElementType();
24108 SDValue Src = Op.getOperand(0);
24109 assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
24110 "Src and Op should have the same element type!");
24112 // Extract the Lo/Hi vectors
24114 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
24115 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
24117 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
24118 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24119 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
24120 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
24123 // Decompose 256-bit ops into smaller 128-bit ops.
24124 static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
24125 assert(Op.getSimpleValueType().is256BitVector() &&
24126 Op.getSimpleValueType().isInteger() &&
24127 "Only handle AVX 256-bit vector integer operation");
24128 return LowerVectorIntUnary(Op, DAG);
24131 // Decompose 512-bit ops into smaller 256-bit ops.
24132 static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
24133 assert(Op.getSimpleValueType().is512BitVector() &&
24134 Op.getSimpleValueType().isInteger() &&
24135 "Only handle AVX 512-bit vector integer operation");
24136 return LowerVectorIntUnary(Op, DAG);
24139 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
24141 // i8/i16 vector implemented using dword LZCNT vector instruction
24142 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
24143 // split the vector, perform operation on it's Lo a Hi part and
24144 // concatenate the results.
24145 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
24146 const X86Subtarget &Subtarget) {
24147 assert(Op.getOpcode() == ISD::CTLZ);
24149 MVT VT = Op.getSimpleValueType();
24150 MVT EltVT = VT.getVectorElementType();
24151 unsigned NumElems = VT.getVectorNumElements();
24153 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
24154 "Unsupported element type");
24156 // Split vector, it's Lo and Hi parts will be handled in next iteration.
24157 if (NumElems > 16 ||
24158 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
24159 return LowerVectorIntUnary(Op, DAG);
24161 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
24162 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
24163 "Unsupported value type for operation");
24165 // Use native supported vector instruction vplzcntd.
24166 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
24167 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
24168 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
24169 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
24171 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
24174 // Lower CTLZ using a PSHUFB lookup table implementation.
24175 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
24176 const X86Subtarget &Subtarget,
24177 SelectionDAG &DAG) {
24178 MVT VT = Op.getSimpleValueType();
24179 int NumElts = VT.getVectorNumElements();
24180 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
24181 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
24183 // Per-nibble leading zero PSHUFB lookup table.
24184 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
24185 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
24186 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
24187 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
24189 SmallVector<SDValue, 64> LUTVec;
24190 for (int i = 0; i < NumBytes; ++i)
24191 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
24192 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
24194 // Begin by bitcasting the input to byte vector, then split those bytes
24195 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
24196 // If the hi input nibble is zero then we add both results together, otherwise
24197 // we just take the hi result (by masking the lo result to zero before the
24199 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
24200 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
24202 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
24204 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
24206 if (CurrVT.is512BitVector()) {
24207 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24208 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
24209 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24211 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
24214 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
24215 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
24216 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
24217 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
24219 // Merge result back from vXi8 back to VT, working on the lo/hi halves
24220 // of the current vector width in the same way we did for the nibbles.
24221 // If the upper half of the input element is zero then add the halves'
24222 // leading zero counts together, otherwise just use the upper half's.
24223 // Double the width of the result until we are at target width.
24224 while (CurrVT != VT) {
24225 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
24226 int CurrNumElts = CurrVT.getVectorNumElements();
24227 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
24228 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
24229 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
24231 // Check if the upper half of the input element is zero.
24232 if (CurrVT.is512BitVector()) {
24233 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24234 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
24235 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24236 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24238 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
24239 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24241 HiZ = DAG.getBitcast(NextVT, HiZ);
24243 // Move the upper/lower halves to the lower bits as we'll be extending to
24244 // NextVT. Mask the lower result to zero if HiZ is true and add the results
24246 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
24247 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
24248 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
24249 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
24250 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
24257 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
24258 const X86Subtarget &Subtarget,
24259 SelectionDAG &DAG) {
24260 MVT VT = Op.getSimpleValueType();
24262 if (Subtarget.hasCDI() &&
24263 // vXi8 vectors need to be promoted to 512-bits for vXi32.
24264 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
24265 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
24267 // Decompose 256-bit ops into smaller 128-bit ops.
24268 if (VT.is256BitVector() && !Subtarget.hasInt256())
24269 return Lower256IntUnary(Op, DAG);
24271 // Decompose 512-bit ops into smaller 256-bit ops.
24272 if (VT.is512BitVector() && !Subtarget.hasBWI())
24273 return Lower512IntUnary(Op, DAG);
24275 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
24276 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
24279 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
24280 SelectionDAG &DAG) {
24281 MVT VT = Op.getSimpleValueType();
24283 unsigned NumBits = VT.getSizeInBits();
24285 unsigned Opc = Op.getOpcode();
24288 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
24290 Op = Op.getOperand(0);
24291 if (VT == MVT::i8) {
24292 // Zero extend to i32 since there is not an i8 bsr.
24294 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
24297 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
24298 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
24299 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
24301 if (Opc == ISD::CTLZ) {
24302 // If src is zero (i.e. bsr sets ZF), returns NumBits.
24305 DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
24306 DAG.getConstant(X86::COND_E, dl, MVT::i8),
24309 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
24312 // Finally xor with NumBits-1.
24313 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
24314 DAG.getConstant(NumBits - 1, dl, OpVT));
24317 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
24321 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
24322 SelectionDAG &DAG) {
24323 MVT VT = Op.getSimpleValueType();
24324 unsigned NumBits = VT.getScalarSizeInBits();
24325 SDValue N0 = Op.getOperand(0);
24328 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
24329 "Only scalar CTTZ requires custom lowering");
24331 // Issue a bsf (scan bits forward) which also sets EFLAGS.
24332 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
24333 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
24335 // If src is zero (i.e. bsf sets ZF), returns NumBits.
24338 DAG.getConstant(NumBits, dl, VT),
24339 DAG.getConstant(X86::COND_E, dl, MVT::i8),
24342 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
24345 /// Break a 256-bit integer operation into two new 128-bit ones and then
24346 /// concatenate the result back.
24347 static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
24348 MVT VT = Op.getSimpleValueType();
24350 assert(VT.is256BitVector() && VT.isInteger() &&
24351 "Unsupported value type for operation");
24353 unsigned NumElems = VT.getVectorNumElements();
24356 // Extract the LHS vectors
24357 SDValue LHS = Op.getOperand(0);
24358 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
24359 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
24361 // Extract the RHS vectors
24362 SDValue RHS = Op.getOperand(1);
24363 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
24364 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
24366 MVT EltVT = VT.getVectorElementType();
24367 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24369 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24370 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24371 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24374 /// Break a 512-bit integer operation into two new 256-bit ones and then
24375 /// concatenate the result back.
24376 static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
24377 MVT VT = Op.getSimpleValueType();
24379 assert(VT.is512BitVector() && VT.isInteger() &&
24380 "Unsupported value type for operation");
24382 unsigned NumElems = VT.getVectorNumElements();
24385 // Extract the LHS vectors
24386 SDValue LHS = Op.getOperand(0);
24387 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
24388 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
24390 // Extract the RHS vectors
24391 SDValue RHS = Op.getOperand(1);
24392 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
24393 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
24395 MVT EltVT = VT.getVectorElementType();
24396 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24398 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24399 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24400 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24403 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
24404 const X86Subtarget &Subtarget) {
24405 MVT VT = Op.getSimpleValueType();
24406 if (VT == MVT::i16 || VT == MVT::i32)
24407 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
24409 if (VT.getScalarType() == MVT::i1)
24410 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
24411 Op.getOperand(0), Op.getOperand(1));
24413 assert(Op.getSimpleValueType().is256BitVector() &&
24414 Op.getSimpleValueType().isInteger() &&
24415 "Only handle AVX 256-bit vector integer operation");
24416 return split256IntArith(Op, DAG);
24419 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
24420 const X86Subtarget &Subtarget) {
24421 MVT VT = Op.getSimpleValueType();
24422 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
24423 unsigned Opcode = Op.getOpcode();
24424 if (VT.getScalarType() == MVT::i1) {
24427 default: llvm_unreachable("Expected saturated arithmetic opcode");
24430 // *addsat i1 X, Y --> X | Y
24431 return DAG.getNode(ISD::OR, dl, VT, X, Y);
24434 // *subsat i1 X, Y --> X & ~Y
24435 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
24439 if (VT.is128BitVector()) {
24440 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
24441 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24442 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
24443 *DAG.getContext(), VT);
24445 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
24446 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
24447 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
24448 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
24449 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
24451 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
24452 // usubsat X, Y --> (X >u Y) ? X - Y : 0
24453 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
24454 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
24455 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
24457 // Use default expansion.
24461 assert(Op.getSimpleValueType().is256BitVector() &&
24462 Op.getSimpleValueType().isInteger() &&
24463 "Only handle AVX 256-bit vector integer operation");
24464 return split256IntArith(Op, DAG);
24467 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
24468 SelectionDAG &DAG) {
24469 MVT VT = Op.getSimpleValueType();
24470 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
24471 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24472 // 8-bit integer abs to NEG and CMOV.
24474 SDValue N0 = Op.getOperand(0);
24475 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24476 DAG.getConstant(0, DL, VT), N0);
24477 SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8),
24478 SDValue(Neg.getNode(), 1)};
24479 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
24482 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
24483 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
24485 SDValue Src = Op.getOperand(0);
24487 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
24488 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
24491 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
24492 assert(VT.isInteger() &&
24493 "Only handle AVX 256-bit vector integer operation");
24494 return Lower256IntUnary(Op, DAG);
24497 // Default to expand.
24501 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
24502 MVT VT = Op.getSimpleValueType();
24504 // For AVX1 cases, split to use legal ops (everything but v4i64).
24505 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
24506 return split256IntArith(Op, DAG);
24509 unsigned Opcode = Op.getOpcode();
24510 SDValue N0 = Op.getOperand(0);
24511 SDValue N1 = Op.getOperand(1);
24513 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
24514 // using the SMIN/SMAX instructions and flipping the signbit back.
24515 if (VT == MVT::v8i16) {
24516 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
24517 "Unexpected MIN/MAX opcode");
24518 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
24519 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
24520 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
24521 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
24522 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
24523 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
24526 // Else, expand to a compare/select.
24529 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
24530 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
24531 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
24532 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
24533 default: llvm_unreachable("Unknown MINMAX opcode");
24536 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
24537 return DAG.getSelect(DL, VT, Cond, N0, N1);
24540 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
24541 SelectionDAG &DAG) {
24543 MVT VT = Op.getSimpleValueType();
24545 if (VT.getScalarType() == MVT::i1)
24546 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
24548 // Decompose 256-bit ops into 128-bit ops.
24549 if (VT.is256BitVector() && !Subtarget.hasInt256())
24550 return split256IntArith(Op, DAG);
24552 SDValue A = Op.getOperand(0);
24553 SDValue B = Op.getOperand(1);
24555 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
24556 // vector pairs, multiply and truncate.
24557 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
24558 unsigned NumElts = VT.getVectorNumElements();
24560 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
24561 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
24562 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
24563 return DAG.getNode(
24564 ISD::TRUNCATE, dl, VT,
24565 DAG.getNode(ISD::MUL, dl, ExVT,
24566 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
24567 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
24570 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
24572 // Extract the lo/hi parts to any extend to i16.
24573 // We're going to mask off the low byte of each result element of the
24574 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
24576 SDValue Undef = DAG.getUNDEF(VT);
24577 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
24578 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
24581 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
24582 // If the LHS is a constant, manually unpackl/unpackh.
24583 SmallVector<SDValue, 16> LoOps, HiOps;
24584 for (unsigned i = 0; i != NumElts; i += 16) {
24585 for (unsigned j = 0; j != 8; ++j) {
24586 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
24588 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
24593 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
24594 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
24596 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
24597 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
24600 // Multiply, mask the lower 8bits of the lo/hi results and pack.
24601 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24602 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24603 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
24604 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
24605 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
24608 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
24609 if (VT == MVT::v4i32) {
24610 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
24611 "Should not custom lower when pmulld is available!");
24613 // Extract the odd parts.
24614 static const int UnpackMask[] = { 1, -1, 3, -1 };
24615 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
24616 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
24618 // Multiply the even parts.
24619 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
24620 DAG.getBitcast(MVT::v2i64, A),
24621 DAG.getBitcast(MVT::v2i64, B));
24622 // Now multiply odd parts.
24623 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
24624 DAG.getBitcast(MVT::v2i64, Aodds),
24625 DAG.getBitcast(MVT::v2i64, Bodds));
24627 Evens = DAG.getBitcast(VT, Evens);
24628 Odds = DAG.getBitcast(VT, Odds);
24630 // Merge the two vectors back together with a shuffle. This expands into 2
24632 static const int ShufMask[] = { 0, 4, 2, 6 };
24633 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
24636 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
24637 "Only know how to lower V2I64/V4I64/V8I64 multiply");
24638 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
24640 // Ahi = psrlqi(a, 32);
24641 // Bhi = psrlqi(b, 32);
24643 // AloBlo = pmuludq(a, b);
24644 // AloBhi = pmuludq(a, Bhi);
24645 // AhiBlo = pmuludq(Ahi, b);
24647 // Hi = psllqi(AloBhi + AhiBlo, 32);
24648 // return AloBlo + Hi;
24649 KnownBits AKnown = DAG.computeKnownBits(A);
24650 KnownBits BKnown = DAG.computeKnownBits(B);
24652 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
24653 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
24654 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
24656 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
24657 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
24658 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
24660 SDValue Zero = DAG.getConstant(0, dl, VT);
24662 // Only multiply lo/hi halves that aren't known to be zero.
24663 SDValue AloBlo = Zero;
24664 if (!ALoIsZero && !BLoIsZero)
24665 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
24667 SDValue AloBhi = Zero;
24668 if (!ALoIsZero && !BHiIsZero) {
24669 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
24670 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
24673 SDValue AhiBlo = Zero;
24674 if (!AHiIsZero && !BLoIsZero) {
24675 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
24676 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
24679 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
24680 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
24682 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
24685 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
24686 SelectionDAG &DAG) {
24688 MVT VT = Op.getSimpleValueType();
24689 bool IsSigned = Op->getOpcode() == ISD::MULHS;
24690 unsigned NumElts = VT.getVectorNumElements();
24691 SDValue A = Op.getOperand(0);
24692 SDValue B = Op.getOperand(1);
24694 // Decompose 256-bit ops into 128-bit ops.
24695 if (VT.is256BitVector() && !Subtarget.hasInt256())
24696 return split256IntArith(Op, DAG);
24698 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
24699 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
24700 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
24701 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
24703 // PMULxD operations multiply each even value (starting at 0) of LHS with
24704 // the related value of RHS and produce a widen result.
24705 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
24706 // => <2 x i64> <ae|cg>
24708 // In other word, to have all the results, we need to perform two PMULxD:
24709 // 1. one with the even values.
24710 // 2. one with the odd values.
24711 // To achieve #2, with need to place the odd values at an even position.
24713 // Place the odd value at an even position (basically, shift all values 1
24714 // step to the left):
24715 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
24716 9, -1, 11, -1, 13, -1, 15, -1};
24717 // <a|b|c|d> => <b|undef|d|undef>
24718 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
24719 makeArrayRef(&Mask[0], NumElts));
24720 // <e|f|g|h> => <f|undef|h|undef>
24721 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
24722 makeArrayRef(&Mask[0], NumElts));
24724 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
24726 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
24728 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
24729 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
24730 // => <2 x i64> <ae|cg>
24731 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
24732 DAG.getBitcast(MulVT, A),
24733 DAG.getBitcast(MulVT, B)));
24734 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
24735 // => <2 x i64> <bf|dh>
24736 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
24737 DAG.getBitcast(MulVT, Odd0),
24738 DAG.getBitcast(MulVT, Odd1)));
24740 // Shuffle it back into the right order.
24741 SmallVector<int, 16> ShufMask(NumElts);
24742 for (int i = 0; i != (int)NumElts; ++i)
24743 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
24745 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
24747 // If we have a signed multiply but no PMULDQ fix up the result of an
24748 // unsigned multiply.
24749 if (IsSigned && !Subtarget.hasSSE41()) {
24750 SDValue Zero = DAG.getConstant(0, dl, VT);
24751 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
24752 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
24753 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
24754 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
24756 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
24757 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
24763 // Only i8 vectors should need custom lowering after this.
24764 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
24765 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
24766 "Unsupported vector type");
24768 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
24769 // logical shift down the upper half and pack back to i8.
24771 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
24772 // and then ashr/lshr the upper bits down to the lower bits before multiply.
24773 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
24775 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
24776 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
24777 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
24778 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
24779 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
24780 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
24781 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
24782 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
24785 // For signed 512-bit vectors, split into 256-bit vectors to allow the
24786 // sign-extension to occur.
24787 if (VT == MVT::v64i8 && IsSigned)
24788 return split512IntArith(Op, DAG);
24790 // Signed AVX2 implementation - extend xmm subvectors to ymm.
24791 if (VT == MVT::v32i8 && IsSigned) {
24792 MVT ExVT = MVT::v16i16;
24793 SDValue ALo = extract128BitVector(A, 0, DAG, dl);
24794 SDValue BLo = extract128BitVector(B, 0, DAG, dl);
24795 SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
24796 SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
24797 ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
24798 BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
24799 AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
24800 BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
24801 SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24802 SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24803 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
24804 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
24806 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
24807 // Shuffle lowering should turn this into PACKUS+PERMQ
24808 Lo = DAG.getBitcast(VT, Lo);
24809 Hi = DAG.getBitcast(VT, Hi);
24810 return DAG.getVectorShuffle(VT, dl, Lo, Hi,
24811 { 0, 2, 4, 6, 8, 10, 12, 14,
24812 16, 18, 20, 22, 24, 26, 28, 30,
24813 32, 34, 36, 38, 40, 42, 44, 46,
24814 48, 50, 52, 54, 56, 58, 60, 62});
24817 // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
24818 // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
24819 // shift the results and pack the half lane results back together.
24821 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
24823 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
24824 -1, -1, -1, -1, -1, -1, -1, -1};
24826 // Extract the lo parts and zero/sign extend to i16.
24827 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
24828 // shifts to sign extend. Using unpack for unsigned only requires an xor to
24829 // create zeros and a copy due to tied registers contraints pre-avx. But using
24830 // zero_extend_vector_inreg would require an additional pshufd for the high
24834 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
24835 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
24837 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
24838 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
24839 } else if (IsSigned) {
24840 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
24841 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
24843 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
24844 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
24846 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
24847 DAG.getConstant(0, dl, VT)));
24848 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
24849 DAG.getConstant(0, dl, VT)));
24853 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
24854 // If the LHS is a constant, manually unpackl/unpackh and extend.
24855 SmallVector<SDValue, 16> LoOps, HiOps;
24856 for (unsigned i = 0; i != NumElts; i += 16) {
24857 for (unsigned j = 0; j != 8; ++j) {
24858 SDValue LoOp = B.getOperand(i + j);
24859 SDValue HiOp = B.getOperand(i + j + 8);
24862 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
24863 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
24865 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
24866 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
24869 LoOps.push_back(LoOp);
24870 HiOps.push_back(HiOp);
24874 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
24875 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
24876 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
24877 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
24879 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
24880 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
24881 } else if (IsSigned) {
24882 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
24883 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
24885 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
24886 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
24888 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
24889 DAG.getConstant(0, dl, VT)));
24890 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
24891 DAG.getConstant(0, dl, VT)));
24894 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
24895 // pack back to vXi8.
24896 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
24897 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
24898 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
24899 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
24901 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
24902 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
24905 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
24906 assert(Subtarget.isTargetWin64() && "Unexpected target");
24907 EVT VT = Op.getValueType();
24908 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
24909 "Unexpected return type for lowering");
24913 switch (Op->getOpcode()) {
24914 default: llvm_unreachable("Unexpected request for libcall!");
24915 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
24916 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
24917 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
24918 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
24919 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
24920 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
24924 SDValue InChain = DAG.getEntryNode();
24926 TargetLowering::ArgListTy Args;
24927 TargetLowering::ArgListEntry Entry;
24928 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
24929 EVT ArgVT = Op->getOperand(i).getValueType();
24930 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
24931 "Unexpected argument type for lowering");
24932 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
24933 Entry.Node = StackPtr;
24934 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
24935 MachinePointerInfo(), /* Alignment = */ 16);
24936 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
24937 Entry.Ty = PointerType::get(ArgTy,0);
24938 Entry.IsSExt = false;
24939 Entry.IsZExt = false;
24940 Args.push_back(Entry);
24943 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
24944 getPointerTy(DAG.getDataLayout()));
24946 TargetLowering::CallLoweringInfo CLI(DAG);
24947 CLI.setDebugLoc(dl)
24950 getLibcallCallingConv(LC),
24951 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
24954 .setSExtResult(isSigned)
24955 .setZExtResult(!isSigned);
24957 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
24958 return DAG.getBitcast(VT, CallInfo.first);
24961 // Return true if the required (according to Opcode) shift-imm form is natively
24962 // supported by the Subtarget
24963 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
24965 if (VT.getScalarSizeInBits() < 16)
24968 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
24969 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
24972 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
24973 (VT.is256BitVector() && Subtarget.hasInt256());
24975 bool AShift = LShift && (Subtarget.hasAVX512() ||
24976 (VT != MVT::v2i64 && VT != MVT::v4i64));
24977 return (Opcode == ISD::SRA) ? AShift : LShift;
24980 // The shift amount is a variable, but it is the same for all vector lanes.
24981 // These instructions are defined together with shift-immediate.
24983 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
24985 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
24988 // Return true if the required (according to Opcode) variable-shift form is
24989 // natively supported by the Subtarget
24990 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
24993 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
24996 // vXi16 supported only on AVX-512, BWI
24997 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
25000 if (Subtarget.hasAVX512())
25003 bool LShift = VT.is128BitVector() || VT.is256BitVector();
25004 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
25005 return (Opcode == ISD::SRA) ? AShift : LShift;
25008 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
25009 const X86Subtarget &Subtarget) {
25010 MVT VT = Op.getSimpleValueType();
25012 SDValue R = Op.getOperand(0);
25013 SDValue Amt = Op.getOperand(1);
25014 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
25016 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
25017 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
25018 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
25019 SDValue Ex = DAG.getBitcast(ExVT, R);
25021 // ashr(R, 63) === cmp_slt(R, 0)
25022 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
25023 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
25024 "Unsupported PCMPGT op");
25025 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
25028 if (ShiftAmt >= 32) {
25029 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
25031 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
25032 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25033 ShiftAmt - 32, DAG);
25034 if (VT == MVT::v2i64)
25035 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
25036 if (VT == MVT::v4i64)
25037 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25038 {9, 1, 11, 3, 13, 5, 15, 7});
25040 // SRA upper i32, SRL whole i64 and select lower i32.
25041 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25044 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
25045 Lower = DAG.getBitcast(ExVT, Lower);
25046 if (VT == MVT::v2i64)
25047 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
25048 if (VT == MVT::v4i64)
25049 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25050 {8, 1, 10, 3, 12, 5, 14, 7});
25052 return DAG.getBitcast(VT, Ex);
25055 // Optimize shl/srl/sra with constant shift amount.
25056 APInt APIntShiftAmt;
25057 if (!isConstantSplat(Amt, APIntShiftAmt))
25060 // If the shift amount is out of range, return undef.
25061 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
25062 return DAG.getUNDEF(VT);
25064 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
25066 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
25067 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
25069 // i64 SRA needs to be performed as partial shifts.
25070 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
25071 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
25072 Op.getOpcode() == ISD::SRA)
25073 return ArithmeticShiftRight64(ShiftAmt);
25075 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
25076 VT == MVT::v64i8) {
25077 unsigned NumElts = VT.getVectorNumElements();
25078 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25080 // Simple i8 add case
25081 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
25082 return DAG.getNode(ISD::ADD, dl, VT, R, R);
25084 // ashr(R, 7) === cmp_slt(R, 0)
25085 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
25086 SDValue Zeros = DAG.getConstant(0, dl, VT);
25087 if (VT.is512BitVector()) {
25088 assert(VT == MVT::v64i8 && "Unexpected element type!");
25089 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
25090 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
25092 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
25095 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
25096 if (VT == MVT::v16i8 && Subtarget.hasXOP())
25099 if (Op.getOpcode() == ISD::SHL) {
25100 // Make a large shift.
25101 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
25103 SHL = DAG.getBitcast(VT, SHL);
25104 // Zero out the rightmost bits.
25105 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
25106 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
25108 if (Op.getOpcode() == ISD::SRL) {
25109 // Make a large shift.
25110 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
25112 SRL = DAG.getBitcast(VT, SRL);
25113 // Zero out the leftmost bits.
25114 return DAG.getNode(ISD::AND, dl, VT, SRL,
25115 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
25117 if (Op.getOpcode() == ISD::SRA) {
25118 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
25119 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25121 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
25122 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
25123 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
25126 llvm_unreachable("Unknown shift opcode.");
25132 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
25133 const X86Subtarget &Subtarget) {
25134 MVT VT = Op.getSimpleValueType();
25136 SDValue R = Op.getOperand(0);
25137 SDValue Amt = Op.getOperand(1);
25138 unsigned Opcode = Op.getOpcode();
25139 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
25140 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
25142 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
25143 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
25144 MVT EltVT = VT.getVectorElementType();
25145 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
25146 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
25147 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
25148 else if (EltVT.bitsLT(MVT::i32))
25149 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25151 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
25154 // vXi8 shifts - shift as v8i16 + mask result.
25155 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
25156 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
25157 VT == MVT::v64i8) &&
25158 !Subtarget.hasXOP()) {
25159 unsigned NumElts = VT.getVectorNumElements();
25160 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25161 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
25162 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
25163 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
25164 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25166 // Create the mask using vXi16 shifts. For shift-rights we need to move
25167 // the upper byte down before splatting the vXi8 mask.
25168 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
25169 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
25170 BaseShAmt, Subtarget, DAG);
25171 if (Opcode != ISD::SHL)
25172 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
25174 BitMask = DAG.getBitcast(VT, BitMask);
25175 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
25176 SmallVector<int, 64>(NumElts, 0));
25178 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
25179 DAG.getBitcast(ExtVT, R), BaseShAmt,
25181 Res = DAG.getBitcast(VT, Res);
25182 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
25184 if (Opcode == ISD::SRA) {
25185 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
25186 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
25187 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
25188 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
25189 BaseShAmt, Subtarget, DAG);
25190 SignMask = DAG.getBitcast(VT, SignMask);
25191 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
25192 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
25199 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
25200 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
25201 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
25202 Amt = Amt.getOperand(0);
25203 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
25204 std::vector<SDValue> Vals(Ratio);
25205 for (unsigned i = 0; i != Ratio; ++i)
25206 Vals[i] = Amt.getOperand(i);
25207 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
25208 for (unsigned j = 0; j != Ratio; ++j)
25209 if (Vals[j] != Amt.getOperand(i + j))
25213 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
25214 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
25219 // Convert a shift/rotate left amount to a multiplication scale factor.
25220 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
25221 const X86Subtarget &Subtarget,
25222 SelectionDAG &DAG) {
25223 MVT VT = Amt.getSimpleValueType();
25224 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
25225 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
25226 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
25229 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
25230 SmallVector<SDValue, 8> Elts;
25231 MVT SVT = VT.getVectorElementType();
25232 unsigned SVTBits = SVT.getSizeInBits();
25233 APInt One(SVTBits, 1);
25234 unsigned NumElems = VT.getVectorNumElements();
25236 for (unsigned i = 0; i != NumElems; ++i) {
25237 SDValue Op = Amt->getOperand(i);
25238 if (Op->isUndef()) {
25239 Elts.push_back(Op);
25243 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
25244 APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
25245 uint64_t ShAmt = C.getZExtValue();
25246 if (ShAmt >= SVTBits) {
25247 Elts.push_back(DAG.getUNDEF(SVT));
25250 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
25252 return DAG.getBuildVector(VT, dl, Elts);
25255 // If the target doesn't support variable shifts, use either FP conversion
25256 // or integer multiplication to avoid shifting each element individually.
25257 if (VT == MVT::v4i32) {
25258 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
25259 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
25260 DAG.getConstant(0x3f800000U, dl, VT));
25261 Amt = DAG.getBitcast(MVT::v4f32, Amt);
25262 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
25265 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
25266 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
25267 SDValue Z = DAG.getConstant(0, dl, VT);
25268 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
25269 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
25270 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
25271 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
25272 if (Subtarget.hasSSE41())
25273 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25275 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
25276 DAG.getBitcast(VT, Hi),
25277 {0, 2, 4, 6, 8, 10, 12, 14});
25283 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
25284 SelectionDAG &DAG) {
25285 MVT VT = Op.getSimpleValueType();
25287 SDValue R = Op.getOperand(0);
25288 SDValue Amt = Op.getOperand(1);
25289 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25290 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25292 unsigned Opc = Op.getOpcode();
25293 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
25294 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
25296 assert(VT.isVector() && "Custom lowering only for vector shifts!");
25297 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
25299 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
25302 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
25305 if (SupportedVectorVarShift(VT, Subtarget, Opc))
25308 // XOP has 128-bit variable logical/arithmetic shifts.
25309 // +ve/-ve Amt = shift left/right.
25310 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
25311 VT == MVT::v8i16 || VT == MVT::v16i8)) {
25312 if (Opc == ISD::SRL || Opc == ISD::SRA) {
25313 SDValue Zero = DAG.getConstant(0, dl, VT);
25314 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
25316 if (Opc == ISD::SHL || Opc == ISD::SRL)
25317 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
25318 if (Opc == ISD::SRA)
25319 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
25322 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
25323 // shifts per-lane and then shuffle the partial results back together.
25324 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
25325 // Splat the shift amounts so the scalar shifts above will catch it.
25326 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
25327 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
25328 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
25329 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
25330 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
25333 // i64 vector arithmetic shift can be emulated with the transform:
25334 // M = lshr(SIGN_MASK, Amt)
25335 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
25336 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
25338 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
25339 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
25340 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25341 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
25342 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
25346 // If possible, lower this shift as a sequence of two shifts by
25347 // constant plus a BLENDing shuffle instead of scalarizing it.
25349 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
25351 // Could be rewritten as:
25352 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
25354 // The advantage is that the two shifts from the example would be
25355 // lowered as X86ISD::VSRLI nodes in parallel before blending.
25356 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
25357 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25358 SDValue Amt1, Amt2;
25359 unsigned NumElts = VT.getVectorNumElements();
25360 SmallVector<int, 8> ShuffleMask;
25361 for (unsigned i = 0; i != NumElts; ++i) {
25362 SDValue A = Amt->getOperand(i);
25364 ShuffleMask.push_back(SM_SentinelUndef);
25367 if (!Amt1 || Amt1 == A) {
25368 ShuffleMask.push_back(i);
25372 if (!Amt2 || Amt2 == A) {
25373 ShuffleMask.push_back(i + NumElts);
25380 // Only perform this blend if we can perform it without loading a mask.
25381 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
25382 (VT != MVT::v16i16 ||
25383 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
25384 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
25385 canWidenShuffleElements(ShuffleMask))) {
25386 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
25387 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
25388 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
25389 Cst2->getAPIntValue().ult(EltSizeInBits)) {
25390 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25391 Cst1->getZExtValue(), DAG);
25392 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25393 Cst2->getZExtValue(), DAG);
25394 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
25399 // If possible, lower this packed shift into a vector multiply instead of
25400 // expanding it into a sequence of scalar shifts.
25401 if (Opc == ISD::SHL)
25402 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
25403 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
25405 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
25406 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
25407 if (Opc == ISD::SRL && ConstantAmt &&
25408 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25409 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25410 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25411 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25412 SDValue Zero = DAG.getConstant(0, dl, VT);
25413 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
25414 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
25415 return DAG.getSelect(dl, VT, ZAmt, R, Res);
25419 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
25420 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
25421 // TODO: Special case handling for shift by 0/1, really we can afford either
25422 // of these cases in pre-SSE41/XOP/AVX512 but not both.
25423 if (Opc == ISD::SRA && ConstantAmt &&
25424 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
25425 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
25426 !Subtarget.hasAVX512()) ||
25427 DAG.isKnownNeverZero(Amt))) {
25428 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25429 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25430 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25432 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
25434 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
25436 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
25437 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
25438 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
25439 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
25443 // v4i32 Non Uniform Shifts.
25444 // If the shift amount is constant we can shift each lane using the SSE2
25445 // immediate shifts, else we need to zero-extend each lane to the lower i64
25446 // and shift using the SSE2 variable shifts.
25447 // The separate results can then be blended together.
25448 if (VT == MVT::v4i32) {
25449 SDValue Amt0, Amt1, Amt2, Amt3;
25451 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
25452 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
25453 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
25454 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
25456 // The SSE2 shifts use the lower i64 as the same shift amount for
25457 // all lanes and the upper i64 is ignored. On AVX we're better off
25458 // just zero-extending, but for SSE just duplicating the top 16-bits is
25459 // cheaper and has the same effect for out of range values.
25460 if (Subtarget.hasAVX()) {
25461 SDValue Z = DAG.getConstant(0, dl, VT);
25462 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
25463 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
25464 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
25465 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
25467 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
25468 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25469 {4, 5, 6, 7, -1, -1, -1, -1});
25470 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25471 {0, 1, 1, 1, -1, -1, -1, -1});
25472 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25473 {2, 3, 3, 3, -1, -1, -1, -1});
25474 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25475 {0, 1, 1, 1, -1, -1, -1, -1});
25476 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25477 {2, 3, 3, 3, -1, -1, -1, -1});
25481 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
25482 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
25483 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
25484 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
25485 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
25487 // Merge the shifted lane results optimally with/without PBLENDW.
25488 // TODO - ideally shuffle combining would handle this.
25489 if (Subtarget.hasSSE41()) {
25490 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
25491 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
25492 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
25494 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
25495 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
25496 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
25499 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
25500 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
25501 // make the existing SSE solution better.
25502 // NOTE: We honor prefered vector width before promoting to 512-bits.
25503 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
25504 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
25505 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
25506 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
25507 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
25508 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
25509 "Unexpected vector type");
25510 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
25511 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
25512 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25513 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
25514 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
25515 return DAG.getNode(ISD::TRUNCATE, dl, VT,
25516 DAG.getNode(Opc, dl, ExtVT, R, Amt));
25519 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
25520 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
25521 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
25522 (VT == MVT::v16i8 || VT == MVT::v64i8 ||
25523 (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
25524 !Subtarget.hasXOP()) {
25525 int NumElts = VT.getVectorNumElements();
25526 SDValue Cst8 = DAG.getConstant(8, dl, MVT::i8);
25528 // Extend constant shift amount to vXi16 (it doesn't matter if the type
25530 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
25531 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
25532 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
25533 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
25534 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
25535 "Constant build vector expected");
25537 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
25538 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
25539 : DAG.getZExtOrTrunc(R, dl, ExVT);
25540 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
25541 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
25542 return DAG.getZExtOrTrunc(R, dl, VT);
25545 SmallVector<SDValue, 16> LoAmt, HiAmt;
25546 for (int i = 0; i != NumElts; i += 16) {
25547 for (int j = 0; j != 8; ++j) {
25548 LoAmt.push_back(Amt.getOperand(i + j));
25549 HiAmt.push_back(Amt.getOperand(i + j + 8));
25553 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
25554 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
25555 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
25557 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
25558 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
25559 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
25560 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
25561 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
25562 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
25563 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
25564 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
25565 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
25568 if (VT == MVT::v16i8 ||
25569 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
25570 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
25571 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
25573 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
25574 if (VT.is512BitVector()) {
25575 // On AVX512BW targets we make use of the fact that VSELECT lowers
25576 // to a masked blend which selects bytes based just on the sign bit
25577 // extracted to a mask.
25578 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
25579 V0 = DAG.getBitcast(VT, V0);
25580 V1 = DAG.getBitcast(VT, V1);
25581 Sel = DAG.getBitcast(VT, Sel);
25582 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
25584 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
25585 } else if (Subtarget.hasSSE41()) {
25586 // On SSE41 targets we make use of the fact that VSELECT lowers
25587 // to PBLENDVB which selects bytes based just on the sign bit.
25588 V0 = DAG.getBitcast(VT, V0);
25589 V1 = DAG.getBitcast(VT, V1);
25590 Sel = DAG.getBitcast(VT, Sel);
25591 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
25593 // On pre-SSE41 targets we test for the sign bit by comparing to
25594 // zero - a negative value will set all bits of the lanes to true
25595 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
25596 SDValue Z = DAG.getConstant(0, dl, SelVT);
25597 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
25598 return DAG.getSelect(dl, SelVT, C, V0, V1);
25601 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
25602 // We can safely do this using i16 shifts as we're only interested in
25603 // the 3 lower bits of each byte.
25604 Amt = DAG.getBitcast(ExtVT, Amt);
25605 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
25606 Amt = DAG.getBitcast(VT, Amt);
25608 if (Opc == ISD::SHL || Opc == ISD::SRL) {
25609 // r = VSELECT(r, shift(r, 4), a);
25610 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
25611 R = SignBitSelect(VT, Amt, M, R);
25614 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25616 // r = VSELECT(r, shift(r, 2), a);
25617 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
25618 R = SignBitSelect(VT, Amt, M, R);
25621 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25623 // return VSELECT(r, shift(r, 1), a);
25624 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
25625 R = SignBitSelect(VT, Amt, M, R);
25629 if (Opc == ISD::SRA) {
25630 // For SRA we need to unpack each byte to the higher byte of a i16 vector
25631 // so we can correctly sign extend. We don't care what happens to the
25633 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
25634 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
25635 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
25636 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
25637 ALo = DAG.getBitcast(ExtVT, ALo);
25638 AHi = DAG.getBitcast(ExtVT, AHi);
25639 RLo = DAG.getBitcast(ExtVT, RLo);
25640 RHi = DAG.getBitcast(ExtVT, RHi);
25642 // r = VSELECT(r, shift(r, 4), a);
25643 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
25644 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
25645 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25646 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25649 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
25650 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
25652 // r = VSELECT(r, shift(r, 2), a);
25653 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
25654 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
25655 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25656 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25659 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
25660 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
25662 // r = VSELECT(r, shift(r, 1), a);
25663 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
25664 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
25665 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
25666 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
25668 // Logical shift the result back to the lower byte, leaving a zero upper
25669 // byte meaning that we can safely pack with PACKUSWB.
25670 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
25671 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
25672 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25676 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
25677 MVT ExtVT = MVT::v8i32;
25678 SDValue Z = DAG.getConstant(0, dl, VT);
25679 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
25680 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
25681 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
25682 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
25683 ALo = DAG.getBitcast(ExtVT, ALo);
25684 AHi = DAG.getBitcast(ExtVT, AHi);
25685 RLo = DAG.getBitcast(ExtVT, RLo);
25686 RHi = DAG.getBitcast(ExtVT, RHi);
25687 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
25688 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
25689 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
25690 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
25691 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25694 if (VT == MVT::v8i16) {
25695 // If we have a constant shift amount, the non-SSE41 path is best as
25696 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
25697 bool UseSSE41 = Subtarget.hasSSE41() &&
25698 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25700 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
25701 // On SSE41 targets we make use of the fact that VSELECT lowers
25702 // to PBLENDVB which selects bytes based just on the sign bit.
25704 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
25705 V0 = DAG.getBitcast(ExtVT, V0);
25706 V1 = DAG.getBitcast(ExtVT, V1);
25707 Sel = DAG.getBitcast(ExtVT, Sel);
25708 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
25710 // On pre-SSE41 targets we splat the sign bit - a negative value will
25711 // set all bits of the lanes to true and VSELECT uses that in
25712 // its OR(AND(V0,C),AND(V1,~C)) lowering.
25714 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
25715 return DAG.getSelect(dl, VT, C, V0, V1);
25718 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
25720 // On SSE41 targets we need to replicate the shift mask in both
25721 // bytes for PBLENDVB.
25724 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
25725 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
25727 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
25730 // r = VSELECT(r, shift(r, 8), a);
25731 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
25732 R = SignBitSelect(Amt, M, R);
25735 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25737 // r = VSELECT(r, shift(r, 4), a);
25738 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
25739 R = SignBitSelect(Amt, M, R);
25742 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25744 // r = VSELECT(r, shift(r, 2), a);
25745 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
25746 R = SignBitSelect(Amt, M, R);
25749 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
25751 // return VSELECT(r, shift(r, 1), a);
25752 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
25753 R = SignBitSelect(Amt, M, R);
25757 // Decompose 256-bit shifts into 128-bit shifts.
25758 if (VT.is256BitVector())
25759 return split256IntArith(Op, DAG);
25764 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
25765 SelectionDAG &DAG) {
25766 MVT VT = Op.getSimpleValueType();
25767 assert(VT.isVector() && "Custom lowering only for vector rotates!");
25770 SDValue R = Op.getOperand(0);
25771 SDValue Amt = Op.getOperand(1);
25772 unsigned Opcode = Op.getOpcode();
25773 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25774 int NumElts = VT.getVectorNumElements();
25776 // Check for constant splat rotation amount.
25778 SmallVector<APInt, 32> EltBits;
25779 int CstSplatIndex = -1;
25780 if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
25781 for (int i = 0; i != NumElts; ++i)
25782 if (!UndefElts[i]) {
25783 if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
25787 CstSplatIndex = -1;
25791 // AVX512 implicitly uses modulo rotation amounts.
25792 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
25793 // Attempt to rotate by immediate.
25794 if (0 <= CstSplatIndex) {
25795 unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
25796 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
25797 return DAG.getNode(Op, DL, VT, R,
25798 DAG.getConstant(RotateAmt, DL, MVT::i8));
25801 // Else, fall-back on VPROLV/VPRORV.
25805 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
25807 // XOP has 128-bit vector variable + immediate rotates.
25808 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
25809 // XOP implicitly uses modulo rotation amounts.
25810 if (Subtarget.hasXOP()) {
25811 if (VT.is256BitVector())
25812 return split256IntArith(Op, DAG);
25813 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
25815 // Attempt to rotate by immediate.
25816 if (0 <= CstSplatIndex) {
25817 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
25818 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
25819 DAG.getConstant(RotateAmt, DL, MVT::i8));
25822 // Use general rotate by variable (per-element).
25826 // Split 256-bit integers on pre-AVX2 targets.
25827 if (VT.is256BitVector() && !Subtarget.hasAVX2())
25828 return split256IntArith(Op, DAG);
25830 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
25831 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
25832 Subtarget.hasAVX2())) &&
25833 "Only vXi32/vXi16/vXi8 vector rotates supported");
25835 // Rotate by an uniform constant - expand back to shifts.
25836 if (0 <= CstSplatIndex)
25839 bool IsSplatAmt = DAG.isSplatValue(Amt);
25841 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
25843 if (EltSizeInBits == 8 && !IsSplatAmt) {
25844 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
25847 // We don't need ModuloAmt here as we just peek at individual bits.
25848 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25850 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
25851 if (Subtarget.hasSSE41()) {
25852 // On SSE41 targets we make use of the fact that VSELECT lowers
25853 // to PBLENDVB which selects bytes based just on the sign bit.
25854 V0 = DAG.getBitcast(VT, V0);
25855 V1 = DAG.getBitcast(VT, V1);
25856 Sel = DAG.getBitcast(VT, Sel);
25857 return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
25859 // On pre-SSE41 targets we test for the sign bit by comparing to
25860 // zero - a negative value will set all bits of the lanes to true
25861 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
25862 SDValue Z = DAG.getConstant(0, DL, SelVT);
25863 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
25864 return DAG.getSelect(DL, SelVT, C, V0, V1);
25867 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
25868 // We can safely do this using i16 shifts as we're only interested in
25869 // the 3 lower bits of each byte.
25870 Amt = DAG.getBitcast(ExtVT, Amt);
25871 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
25872 Amt = DAG.getBitcast(VT, Amt);
25874 // r = VSELECT(r, rot(r, 4), a);
25878 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
25879 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
25880 R = SignBitSelect(VT, Amt, M, R);
25883 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
25885 // r = VSELECT(r, rot(r, 2), a);
25888 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
25889 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
25890 R = SignBitSelect(VT, Amt, M, R);
25893 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
25895 // return VSELECT(r, rot(r, 1), a);
25898 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
25899 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
25900 return SignBitSelect(VT, Amt, M, R);
25903 // ISD::ROT* uses modulo rotate amounts.
25904 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
25905 DAG.getConstant(EltSizeInBits - 1, DL, VT));
25907 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25908 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
25909 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
25911 // Fallback for splats + all supported variable shifts.
25912 // Fallback for non-constants AVX2 vXi16 as well.
25913 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
25914 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
25915 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
25916 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
25917 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
25918 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
25921 // As with shifts, convert the rotation amount to a multiplication factor.
25922 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
25923 assert(Scale && "Failed to convert ROTL amount to scale");
25925 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
25926 if (EltSizeInBits == 16) {
25927 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
25928 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
25929 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
25932 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
25933 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
25934 // that can then be OR'd with the lower 32-bits.
25935 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
25936 static const int OddMask[] = {1, -1, 3, -1};
25937 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
25938 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
25940 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
25941 DAG.getBitcast(MVT::v2i64, R),
25942 DAG.getBitcast(MVT::v2i64, Scale));
25943 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
25944 DAG.getBitcast(MVT::v2i64, R13),
25945 DAG.getBitcast(MVT::v2i64, Scale13));
25946 Res02 = DAG.getBitcast(VT, Res02);
25947 Res13 = DAG.getBitcast(VT, Res13);
25949 return DAG.getNode(ISD::OR, DL, VT,
25950 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
25951 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
25954 /// Returns true if the operand type is exactly twice the native width, and
25955 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
25956 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
25957 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
25958 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
25959 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
25962 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
25963 if (OpWidth == 128)
25964 return Subtarget.hasCmpxchg16b();
25969 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
25970 // TODO: In 32-bit mode, use FISTP when X87 is available?
25971 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
25972 Type *MemType = SI->getValueOperand()->getType();
25974 bool NoImplicitFloatOps =
25975 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
25976 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
25977 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
25980 return needsCmpXchgNb(MemType);
25983 // Note: this turns large loads into lock cmpxchg8b/16b.
25984 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
25985 TargetLowering::AtomicExpansionKind
25986 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
25987 Type *MemType = LI->getType();
25989 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
25990 // can use movq to do the load. If we have X87 we can load into an 80-bit
25991 // X87 register and store it to a stack temporary.
25992 bool NoImplicitFloatOps =
25993 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
25994 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
25995 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
25996 (Subtarget.hasSSE2() || Subtarget.hasX87()))
25997 return AtomicExpansionKind::None;
25999 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26000 : AtomicExpansionKind::None;
26003 TargetLowering::AtomicExpansionKind
26004 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
26005 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26006 Type *MemType = AI->getType();
26008 // If the operand is too big, we must see if cmpxchg8/16b is available
26009 // and default to library calls otherwise.
26010 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
26011 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26012 : AtomicExpansionKind::None;
26015 AtomicRMWInst::BinOp Op = AI->getOperation();
26018 llvm_unreachable("Unknown atomic operation");
26019 case AtomicRMWInst::Xchg:
26020 case AtomicRMWInst::Add:
26021 case AtomicRMWInst::Sub:
26022 // It's better to use xadd, xsub or xchg for these in all cases.
26023 return AtomicExpansionKind::None;
26024 case AtomicRMWInst::Or:
26025 case AtomicRMWInst::And:
26026 case AtomicRMWInst::Xor:
26027 // If the atomicrmw's result isn't actually used, we can just add a "lock"
26028 // prefix to a normal instruction for these operations.
26029 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
26030 : AtomicExpansionKind::None;
26031 case AtomicRMWInst::Nand:
26032 case AtomicRMWInst::Max:
26033 case AtomicRMWInst::Min:
26034 case AtomicRMWInst::UMax:
26035 case AtomicRMWInst::UMin:
26036 case AtomicRMWInst::FAdd:
26037 case AtomicRMWInst::FSub:
26038 // These always require a non-trivial set of data operations on x86. We must
26039 // use a cmpxchg loop.
26040 return AtomicExpansionKind::CmpXChg;
26045 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
26046 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26047 Type *MemType = AI->getType();
26048 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
26049 // there is no benefit in turning such RMWs into loads, and it is actually
26050 // harmful as it introduces a mfence.
26051 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
26054 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
26055 // lowering available in lowerAtomicArith.
26056 // TODO: push more cases through this path.
26057 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
26058 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
26062 auto Builder = IRBuilder<>(AI);
26063 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
26064 auto SSID = AI->getSyncScopeID();
26065 // We must restrict the ordering to avoid generating loads with Release or
26066 // ReleaseAcquire orderings.
26067 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
26069 // Before the load we need a fence. Here is an example lifted from
26070 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
26073 // x.store(1, relaxed);
26074 // r1 = y.fetch_add(0, release);
26076 // y.fetch_add(42, acquire);
26077 // r2 = x.load(relaxed);
26078 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
26079 // lowered to just a load without a fence. A mfence flushes the store buffer,
26080 // making the optimization clearly correct.
26081 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
26082 // otherwise, we might be able to be more aggressive on relaxed idempotent
26083 // rmw. In practice, they do not look useful, so we don't try to be
26084 // especially clever.
26085 if (SSID == SyncScope::SingleThread)
26086 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
26087 // the IR level, so we must wrap it in an intrinsic.
26090 if (!Subtarget.hasMFence())
26091 // FIXME: it might make sense to use a locked operation here but on a
26092 // different cache-line to prevent cache-line bouncing. In practice it
26093 // is probably a small win, and x86 processors without mfence are rare
26094 // enough that we do not bother.
26098 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
26099 Builder.CreateCall(MFence, {});
26101 // Finally we can emit the atomic load.
26103 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
26104 AI->getType()->getPrimitiveSizeInBits());
26105 Loaded->setAtomic(Order, SSID);
26106 AI->replaceAllUsesWith(Loaded);
26107 AI->eraseFromParent();
26111 /// Emit a locked operation on a stack location which does not change any
26112 /// memory location, but does involve a lock prefix. Location is chosen to be
26113 /// a) very likely accessed only by a single thread to minimize cache traffic,
26114 /// and b) definitely dereferenceable. Returns the new Chain result.
26115 static SDValue emitLockedStackOp(SelectionDAG &DAG,
26116 const X86Subtarget &Subtarget,
26117 SDValue Chain, SDLoc DL) {
26118 // Implementation notes:
26119 // 1) LOCK prefix creates a full read/write reordering barrier for memory
26120 // operations issued by the current processor. As such, the location
26121 // referenced is not relevant for the ordering properties of the instruction.
26122 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
26123 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
26124 // 2) Using an immediate operand appears to be the best encoding choice
26125 // here since it doesn't require an extra register.
26126 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
26127 // is small enough it might just be measurement noise.)
26128 // 4) When choosing offsets, there are several contributing factors:
26129 // a) If there's no redzone, we default to TOS. (We could allocate a cache
26130 // line aligned stack object to improve this case.)
26131 // b) To minimize our chances of introducing a false dependence, we prefer
26132 // to offset the stack usage from TOS slightly.
26133 // c) To minimize concerns about cross thread stack usage - in particular,
26134 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
26135 // captures state in the TOS frame and accesses it from many threads -
26136 // we want to use an offset such that the offset is in a distinct cache
26137 // line from the TOS frame.
26139 // For a general discussion of the tradeoffs and benchmark results, see:
26140 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
26142 auto &MF = DAG.getMachineFunction();
26143 auto &TFL = *Subtarget.getFrameLowering();
26144 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
26146 if (Subtarget.is64Bit()) {
26147 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26149 DAG.getRegister(X86::RSP, MVT::i64), // Base
26150 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26151 DAG.getRegister(0, MVT::i64), // Index
26152 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26153 DAG.getRegister(0, MVT::i16), // Segment.
26156 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26158 return SDValue(Res, 1);
26161 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26163 DAG.getRegister(X86::ESP, MVT::i32), // Base
26164 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26165 DAG.getRegister(0, MVT::i32), // Index
26166 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26167 DAG.getRegister(0, MVT::i16), // Segment.
26171 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26173 return SDValue(Res, 1);
26176 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
26177 SelectionDAG &DAG) {
26179 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
26180 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
26181 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
26182 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
26184 // The only fence that needs an instruction is a sequentially-consistent
26185 // cross-thread fence.
26186 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
26187 FenceSSID == SyncScope::System) {
26188 if (Subtarget.hasMFence())
26189 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
26191 SDValue Chain = Op.getOperand(0);
26192 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
26195 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26196 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
26199 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
26200 SelectionDAG &DAG) {
26201 MVT T = Op.getSimpleValueType();
26205 switch(T.SimpleTy) {
26206 default: llvm_unreachable("Invalid value type!");
26207 case MVT::i8: Reg = X86::AL; size = 1; break;
26208 case MVT::i16: Reg = X86::AX; size = 2; break;
26209 case MVT::i32: Reg = X86::EAX; size = 4; break;
26211 assert(Subtarget.is64Bit() && "Node not type legal!");
26212 Reg = X86::RAX; size = 8;
26215 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
26216 Op.getOperand(2), SDValue());
26217 SDValue Ops[] = { cpIn.getValue(0),
26220 DAG.getTargetConstant(size, DL, MVT::i8),
26221 cpIn.getValue(1) };
26222 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26223 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
26224 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
26228 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
26229 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
26230 MVT::i32, cpOut.getValue(2));
26231 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
26233 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26234 cpOut, Success, EFLAGS.getValue(1));
26237 // Create MOVMSKB, taking into account whether we need to split for AVX1.
26238 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
26239 const X86Subtarget &Subtarget) {
26240 MVT InVT = V.getSimpleValueType();
26242 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
26244 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26245 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
26246 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
26247 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
26248 DAG.getConstant(16, DL, MVT::i8));
26249 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
26252 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
26255 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
26256 SelectionDAG &DAG) {
26257 SDValue Src = Op.getOperand(0);
26258 MVT SrcVT = Src.getSimpleValueType();
26259 MVT DstVT = Op.getSimpleValueType();
26261 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
26262 // half to v32i1 and concatenating the result.
26263 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
26264 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
26265 assert(Subtarget.hasBWI() && "Expected BWI target");
26267 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26268 DAG.getIntPtrConstant(0, dl));
26269 Lo = DAG.getBitcast(MVT::v32i1, Lo);
26270 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26271 DAG.getIntPtrConstant(1, dl));
26272 Hi = DAG.getBitcast(MVT::v32i1, Hi);
26273 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26276 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
26277 if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
26278 DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
26281 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
26282 EVT CastVT = MVT::getVectorVT(DstVT.getVectorElementType(),
26283 DstVT.getVectorNumElements() / 2);
26284 Lo = DAG.getBitcast(CastVT, Lo);
26285 Hi = DAG.getBitcast(CastVT, Hi);
26286 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
26289 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
26290 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
26291 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
26292 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
26294 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
26295 V = getPMOVMSKB(DL, V, DAG, Subtarget);
26296 return DAG.getZExtOrTrunc(V, DL, DstVT);
26299 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
26300 SrcVT == MVT::i64) {
26301 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
26302 if (DstVT != MVT::f64 && DstVT != MVT::i64 &&
26303 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
26304 // This conversion needs to be expanded.
26308 if (SrcVT.isVector()) {
26309 // Widen the vector in input in the case of MVT::v2i32.
26310 // Example: from MVT::v2i32 to MVT::v4i32.
26311 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
26312 SrcVT.getVectorNumElements() * 2);
26313 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
26314 DAG.getUNDEF(SrcVT));
26316 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
26317 "Unexpected source type in LowerBITCAST");
26318 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
26321 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
26322 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
26324 if (DstVT == MVT::x86mmx)
26325 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
26327 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
26328 DAG.getIntPtrConstant(0, dl));
26331 assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
26332 Subtarget.hasMMX() && "Unexpected custom BITCAST");
26333 assert((DstVT == MVT::i64 ||
26334 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
26335 "Unexpected custom BITCAST");
26336 // i64 <=> MMX conversions are Legal.
26337 if (SrcVT==MVT::i64 && DstVT.isVector())
26339 if (DstVT==MVT::i64 && SrcVT.isVector())
26341 // MMX <=> MMX conversions are Legal.
26342 if (SrcVT.isVector() && DstVT.isVector())
26344 // All other conversions need to be expanded.
26348 /// Compute the horizontal sum of bytes in V for the elements of VT.
26350 /// Requires V to be a byte vector and VT to be an integer vector type with
26351 /// wider elements than V's type. The width of the elements of VT determines
26352 /// how many bytes of V are summed horizontally to produce each element of the
26354 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
26355 const X86Subtarget &Subtarget,
26356 SelectionDAG &DAG) {
26358 MVT ByteVecVT = V.getSimpleValueType();
26359 MVT EltVT = VT.getVectorElementType();
26360 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
26361 "Expected value to have byte element type.");
26362 assert(EltVT != MVT::i8 &&
26363 "Horizontal byte sum only makes sense for wider elements!");
26364 unsigned VecSize = VT.getSizeInBits();
26365 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
26367 // PSADBW instruction horizontally add all bytes and leave the result in i64
26368 // chunks, thus directly computes the pop count for v2i64 and v4i64.
26369 if (EltVT == MVT::i64) {
26370 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
26371 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26372 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
26373 return DAG.getBitcast(VT, V);
26376 if (EltVT == MVT::i32) {
26377 // We unpack the low half and high half into i32s interleaved with zeros so
26378 // that we can use PSADBW to horizontally sum them. The most useful part of
26379 // this is that it lines up the results of two PSADBW instructions to be
26380 // two v2i64 vectors which concatenated are the 4 population counts. We can
26381 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
26382 SDValue Zeros = DAG.getConstant(0, DL, VT);
26383 SDValue V32 = DAG.getBitcast(VT, V);
26384 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
26385 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
26387 // Do the horizontal sums into two v2i64s.
26388 Zeros = DAG.getConstant(0, DL, ByteVecVT);
26389 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26390 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26391 DAG.getBitcast(ByteVecVT, Low), Zeros);
26392 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26393 DAG.getBitcast(ByteVecVT, High), Zeros);
26395 // Merge them together.
26396 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
26397 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
26398 DAG.getBitcast(ShortVecVT, Low),
26399 DAG.getBitcast(ShortVecVT, High));
26401 return DAG.getBitcast(VT, V);
26404 // The only element type left is i16.
26405 assert(EltVT == MVT::i16 && "Unknown how to handle type");
26407 // To obtain pop count for each i16 element starting from the pop count for
26408 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
26409 // right by 8. It is important to shift as i16s as i8 vector shift isn't
26410 // directly supported.
26411 SDValue ShifterV = DAG.getConstant(8, DL, VT);
26412 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26413 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
26414 DAG.getBitcast(ByteVecVT, V));
26415 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26418 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
26419 const X86Subtarget &Subtarget,
26420 SelectionDAG &DAG) {
26421 MVT VT = Op.getSimpleValueType();
26422 MVT EltVT = VT.getVectorElementType();
26423 int NumElts = VT.getVectorNumElements();
26425 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
26427 // Implement a lookup table in register by using an algorithm based on:
26428 // http://wm.ite.pl/articles/sse-popcount.html
26430 // The general idea is that every lower byte nibble in the input vector is an
26431 // index into a in-register pre-computed pop count table. We then split up the
26432 // input vector in two new ones: (1) a vector with only the shifted-right
26433 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
26434 // masked out higher ones) for each byte. PSHUFB is used separately with both
26435 // to index the in-register table. Next, both are added and the result is a
26436 // i8 vector where each element contains the pop count for input byte.
26437 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
26438 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
26439 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
26440 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
26442 SmallVector<SDValue, 64> LUTVec;
26443 for (int i = 0; i < NumElts; ++i)
26444 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26445 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
26446 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
26449 SDValue FourV = DAG.getConstant(4, DL, VT);
26450 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
26453 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
26455 // The input vector is used as the shuffle mask that index elements into the
26456 // LUT. After counting low and high nibbles, add the vector to obtain the
26457 // final pop count per i8 element.
26458 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
26459 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
26460 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
26463 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
26464 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
26465 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26466 SelectionDAG &DAG) {
26467 MVT VT = Op.getSimpleValueType();
26468 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
26469 "Unknown CTPOP type to handle");
26470 SDLoc DL(Op.getNode());
26471 SDValue Op0 = Op.getOperand(0);
26473 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
26474 if (Subtarget.hasVPOPCNTDQ()) {
26475 unsigned NumElems = VT.getVectorNumElements();
26476 assert((VT.getVectorElementType() == MVT::i8 ||
26477 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
26478 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
26479 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26480 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
26481 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
26482 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
26486 // Decompose 256-bit ops into smaller 128-bit ops.
26487 if (VT.is256BitVector() && !Subtarget.hasInt256())
26488 return Lower256IntUnary(Op, DAG);
26490 // Decompose 512-bit ops into smaller 256-bit ops.
26491 if (VT.is512BitVector() && !Subtarget.hasBWI())
26492 return Lower512IntUnary(Op, DAG);
26494 // For element types greater than i8, do vXi8 pop counts and a bytesum.
26495 if (VT.getScalarType() != MVT::i8) {
26496 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
26497 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
26498 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
26499 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
26502 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
26503 if (!Subtarget.hasSSSE3())
26506 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
26509 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26510 SelectionDAG &DAG) {
26511 assert(Op.getSimpleValueType().isVector() &&
26512 "We only do custom lowering for vector population count.");
26513 return LowerVectorCTPOP(Op, Subtarget, DAG);
26516 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
26517 MVT VT = Op.getSimpleValueType();
26518 SDValue In = Op.getOperand(0);
26521 // For scalars, its still beneficial to transfer to/from the SIMD unit to
26522 // perform the BITREVERSE.
26523 if (!VT.isVector()) {
26524 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
26525 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
26526 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
26527 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
26528 DAG.getIntPtrConstant(0, DL));
26531 int NumElts = VT.getVectorNumElements();
26532 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
26534 // Decompose 256-bit ops into smaller 128-bit ops.
26535 if (VT.is256BitVector())
26536 return Lower256IntUnary(Op, DAG);
26538 assert(VT.is128BitVector() &&
26539 "Only 128-bit vector bitreverse lowering supported.");
26541 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
26542 // perform the BSWAP in the shuffle.
26543 // Its best to shuffle using the second operand as this will implicitly allow
26544 // memory folding for multiple vectors.
26545 SmallVector<SDValue, 16> MaskElts;
26546 for (int i = 0; i != NumElts; ++i) {
26547 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
26548 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
26549 int PermuteByte = SourceByte | (2 << 5);
26550 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
26554 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
26555 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
26556 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
26558 return DAG.getBitcast(VT, Res);
26561 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
26562 SelectionDAG &DAG) {
26563 MVT VT = Op.getSimpleValueType();
26565 if (Subtarget.hasXOP() && !VT.is512BitVector())
26566 return LowerBITREVERSE_XOP(Op, DAG);
26568 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
26570 SDValue In = Op.getOperand(0);
26573 unsigned NumElts = VT.getVectorNumElements();
26574 assert(VT.getScalarType() == MVT::i8 &&
26575 "Only byte vector BITREVERSE supported");
26577 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
26578 if (VT.is256BitVector() && !Subtarget.hasInt256())
26579 return Lower256IntUnary(Op, DAG);
26581 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
26582 // two nibbles and a PSHUFB lookup to find the bitreverse of each
26583 // 0-15 value (moved to the other nibble).
26584 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
26585 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
26586 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
26588 const int LoLUT[16] = {
26589 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
26590 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
26591 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
26592 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
26593 const int HiLUT[16] = {
26594 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
26595 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
26596 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
26597 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
26599 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
26600 for (unsigned i = 0; i < NumElts; ++i) {
26601 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
26602 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
26605 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
26606 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
26607 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
26608 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
26609 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
26612 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
26613 const X86Subtarget &Subtarget) {
26614 unsigned NewOpc = 0;
26615 switch (N->getOpcode()) {
26616 case ISD::ATOMIC_LOAD_ADD:
26617 NewOpc = X86ISD::LADD;
26619 case ISD::ATOMIC_LOAD_SUB:
26620 NewOpc = X86ISD::LSUB;
26622 case ISD::ATOMIC_LOAD_OR:
26623 NewOpc = X86ISD::LOR;
26625 case ISD::ATOMIC_LOAD_XOR:
26626 NewOpc = X86ISD::LXOR;
26628 case ISD::ATOMIC_LOAD_AND:
26629 NewOpc = X86ISD::LAND;
26632 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
26635 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
26637 return DAG.getMemIntrinsicNode(
26638 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
26639 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
26640 /*MemVT=*/N->getSimpleValueType(0), MMO);
26643 /// Lower atomic_load_ops into LOCK-prefixed operations.
26644 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
26645 const X86Subtarget &Subtarget) {
26646 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
26647 SDValue Chain = N->getOperand(0);
26648 SDValue LHS = N->getOperand(1);
26649 SDValue RHS = N->getOperand(2);
26650 unsigned Opc = N->getOpcode();
26651 MVT VT = N->getSimpleValueType(0);
26654 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
26655 // can only be lowered when the result is unused. They should have already
26656 // been transformed into a cmpxchg loop in AtomicExpand.
26657 if (N->hasAnyUseOfValue(0)) {
26658 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
26659 // select LXADD if LOCK_SUB can't be selected.
26660 if (Opc == ISD::ATOMIC_LOAD_SUB) {
26661 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
26662 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
26663 RHS, AN->getMemOperand());
26665 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
26666 "Used AtomicRMW ops other than Add should have been expanded!");
26670 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
26671 // The core idea here is that since the memory location isn't actually
26672 // changing, all we need is a lowering for the *ordering* impacts of the
26673 // atomicrmw. As such, we can chose a different operation and memory
26674 // location to minimize impact on other code.
26675 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
26676 // On X86, the only ordering which actually requires an instruction is
26677 // seq_cst which isn't SingleThread, everything just needs to be preserved
26678 // during codegen and then dropped. Note that we expect (but don't assume),
26679 // that orderings other than seq_cst and acq_rel have been canonicalized to
26680 // a store or load.
26681 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
26682 AN->getSyncScopeID() == SyncScope::System) {
26683 // Prefer a locked operation against a stack location to minimize cache
26684 // traffic. This assumes that stack locations are very likely to be
26685 // accessed only by the owning thread.
26686 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
26687 assert(!N->hasAnyUseOfValue(0));
26688 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26689 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26690 DAG.getUNDEF(VT), NewChain);
26692 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26693 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
26694 assert(!N->hasAnyUseOfValue(0));
26695 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26696 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26697 DAG.getUNDEF(VT), NewChain);
26700 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
26701 // RAUW the chain, but don't worry about the result, as it's unused.
26702 assert(!N->hasAnyUseOfValue(0));
26703 // NOTE: The getUNDEF is needed to give something for the unused result 0.
26704 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
26705 DAG.getUNDEF(VT), LockOp.getValue(1));
26708 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
26709 const X86Subtarget &Subtarget) {
26710 auto *Node = cast<AtomicSDNode>(Op.getNode());
26712 EVT VT = Node->getMemoryVT();
26714 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
26715 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
26717 // If this store is not sequentially consistent and the type is legal
26718 // we can just keep it.
26719 if (!IsSeqCst && IsTypeLegal)
26722 if (VT == MVT::i64 && !IsTypeLegal) {
26723 // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
26724 // FIXME: Use movlps with SSE1.
26725 // FIXME: Use fist with X87.
26726 bool NoImplicitFloatOps =
26727 DAG.getMachineFunction().getFunction().hasFnAttribute(
26728 Attribute::NoImplicitFloat);
26729 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
26730 Subtarget.hasSSE2()) {
26731 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
26732 Node->getOperand(2));
26733 SDVTList Tys = DAG.getVTList(MVT::Other);
26734 SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
26735 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
26737 Node->getMemOperand());
26739 // If this is a sequentially consistent store, also emit an appropriate
26742 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
26748 // Convert seq_cst store -> xchg
26749 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
26750 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
26751 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
26752 Node->getMemoryVT(),
26753 Node->getOperand(0),
26754 Node->getOperand(1), Node->getOperand(2),
26755 Node->getMemOperand());
26756 return Swap.getValue(1);
26759 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
26760 SDNode *N = Op.getNode();
26761 MVT VT = N->getSimpleValueType(0);
26763 // Let legalize expand this if it isn't a legal type yet.
26764 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
26767 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
26770 // Set the carry flag.
26771 SDValue Carry = Op.getOperand(2);
26772 EVT CarryVT = Carry.getValueType();
26773 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
26774 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
26775 Carry, DAG.getConstant(NegOne, DL, CarryVT));
26777 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
26778 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
26779 Op.getOperand(1), Carry.getValue(1));
26781 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
26782 if (N->getValueType(1) == MVT::i1)
26783 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
26785 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
26788 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
26789 SelectionDAG &DAG) {
26790 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
26792 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
26793 // which returns the values as { float, float } (in XMM0) or
26794 // { double, double } (which is returned in XMM0, XMM1).
26796 SDValue Arg = Op.getOperand(0);
26797 EVT ArgVT = Arg.getValueType();
26798 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26800 TargetLowering::ArgListTy Args;
26801 TargetLowering::ArgListEntry Entry;
26805 Entry.IsSExt = false;
26806 Entry.IsZExt = false;
26807 Args.push_back(Entry);
26809 bool isF64 = ArgVT == MVT::f64;
26810 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
26811 // the small struct {f32, f32} is returned in (eax, edx). For f64,
26812 // the results are returned via SRet in memory.
26813 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26814 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
26815 const char *LibcallName = TLI.getLibcallName(LC);
26817 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
26819 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
26820 : (Type *)VectorType::get(ArgTy, 4);
26822 TargetLowering::CallLoweringInfo CLI(DAG);
26823 CLI.setDebugLoc(dl)
26824 .setChain(DAG.getEntryNode())
26825 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
26827 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
26830 // Returned in xmm0 and xmm1.
26831 return CallResult.first;
26833 // Returned in bits 0:31 and 32:64 xmm0.
26834 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
26835 CallResult.first, DAG.getIntPtrConstant(0, dl));
26836 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
26837 CallResult.first, DAG.getIntPtrConstant(1, dl));
26838 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
26839 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
26842 /// Widen a vector input to a vector of NVT. The
26843 /// input vector must have the same element type as NVT.
26844 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
26845 bool FillWithZeroes = false) {
26846 // Check if InOp already has the right width.
26847 MVT InVT = InOp.getSimpleValueType();
26851 if (InOp.isUndef())
26852 return DAG.getUNDEF(NVT);
26854 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
26855 "input and widen element type must match");
26857 unsigned InNumElts = InVT.getVectorNumElements();
26858 unsigned WidenNumElts = NVT.getVectorNumElements();
26859 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
26860 "Unexpected request for vector widening");
26863 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
26864 InOp.getNumOperands() == 2) {
26865 SDValue N1 = InOp.getOperand(1);
26866 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
26868 InOp = InOp.getOperand(0);
26869 InVT = InOp.getSimpleValueType();
26870 InNumElts = InVT.getVectorNumElements();
26873 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
26874 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
26875 SmallVector<SDValue, 16> Ops;
26876 for (unsigned i = 0; i < InNumElts; ++i)
26877 Ops.push_back(InOp.getOperand(i));
26879 EVT EltVT = InOp.getOperand(0).getValueType();
26881 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
26882 DAG.getUNDEF(EltVT);
26883 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
26884 Ops.push_back(FillVal);
26885 return DAG.getBuildVector(NVT, dl, Ops);
26887 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
26889 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
26890 InOp, DAG.getIntPtrConstant(0, dl));
26893 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
26894 SelectionDAG &DAG) {
26895 assert(Subtarget.hasAVX512() &&
26896 "MGATHER/MSCATTER are supported on AVX-512 arch only");
26898 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
26899 SDValue Src = N->getValue();
26900 MVT VT = Src.getSimpleValueType();
26901 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
26904 SDValue Scale = N->getScale();
26905 SDValue Index = N->getIndex();
26906 SDValue Mask = N->getMask();
26907 SDValue Chain = N->getChain();
26908 SDValue BasePtr = N->getBasePtr();
26910 if (VT == MVT::v2f32) {
26911 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
26912 // If the index is v2i64 and we have VLX we can use xmm for data and index.
26913 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
26914 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
26915 DAG.getUNDEF(MVT::v2f32));
26916 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
26917 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26918 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26919 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26920 return SDValue(NewScatter.getNode(), 1);
26925 if (VT == MVT::v2i32) {
26926 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
26927 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
26928 DAG.getUNDEF(MVT::v2i32));
26929 // If the index is v2i64 and we have VLX we can use xmm for data and index.
26930 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
26931 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
26932 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26933 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26934 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26935 return SDValue(NewScatter.getNode(), 1);
26937 // Custom widen all the operands to avoid promotion.
26938 EVT NewIndexVT = EVT::getVectorVT(
26939 *DAG.getContext(), Index.getValueType().getVectorElementType(), 4);
26940 Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
26941 DAG.getUNDEF(Index.getValueType()));
26942 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
26943 DAG.getConstant(0, dl, MVT::v2i1));
26944 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26945 return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(), dl,
26946 Ops, N->getMemOperand());
26949 MVT IndexVT = Index.getSimpleValueType();
26950 MVT MaskVT = Mask.getSimpleValueType();
26952 // If the index is v2i32, we're being called by type legalization and we
26953 // should just let the default handling take care of it.
26954 if (IndexVT == MVT::v2i32)
26957 // If we don't have VLX and neither the passthru or index is 512-bits, we
26958 // need to widen until one is.
26959 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
26960 !Index.getSimpleValueType().is512BitVector()) {
26961 // Determine how much we need to widen by to get a 512-bit type.
26962 unsigned Factor = std::min(512/VT.getSizeInBits(),
26963 512/IndexVT.getSizeInBits());
26964 unsigned NumElts = VT.getVectorNumElements() * Factor;
26966 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
26967 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
26968 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
26970 Src = ExtendToType(Src, VT, DAG);
26971 Index = ExtendToType(Index, IndexVT, DAG);
26972 Mask = ExtendToType(Mask, MaskVT, DAG, true);
26975 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
26976 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
26977 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
26978 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
26979 return SDValue(NewScatter.getNode(), 1);
26982 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
26983 SelectionDAG &DAG) {
26985 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
26986 MVT VT = Op.getSimpleValueType();
26987 MVT ScalarVT = VT.getScalarType();
26988 SDValue Mask = N->getMask();
26989 MVT MaskVT = Mask.getSimpleValueType();
26990 SDValue PassThru = N->getPassThru();
26993 // Handle AVX masked loads which don't support passthru other than 0.
26994 if (MaskVT.getVectorElementType() != MVT::i1) {
26995 // We also allow undef in the isel pattern.
26996 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
26999 SDValue NewLoad = DAG.getMaskedLoad(VT, dl, N->getChain(),
27000 N->getBasePtr(), Mask,
27001 getZeroVector(VT, Subtarget, DAG, dl),
27002 N->getMemoryVT(), N->getMemOperand(),
27003 N->getExtensionType(),
27004 N->isExpandingLoad());
27006 SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
27008 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
27011 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
27012 "Expanding masked load is supported on AVX-512 target only!");
27014 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
27015 "Expanding masked load is supported for 32 and 64-bit types only!");
27017 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27018 "Cannot lower masked load op.");
27020 assert((ScalarVT.getSizeInBits() >= 32 ||
27021 (Subtarget.hasBWI() &&
27022 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
27023 "Unsupported masked load op.");
27025 // This operation is legal for targets with VLX, but without
27026 // VLX the vector should be widened to 512 bit
27027 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
27028 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27029 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
27031 // Mask element has to be i1.
27032 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
27033 "Unexpected mask type");
27035 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27037 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27038 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
27039 N->getBasePtr(), Mask, PassThru,
27040 N->getMemoryVT(), N->getMemOperand(),
27041 N->getExtensionType(),
27042 N->isExpandingLoad());
27044 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
27045 NewLoad.getValue(0),
27046 DAG.getIntPtrConstant(0, dl));
27047 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
27048 return DAG.getMergeValues(RetOps, dl);
27051 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
27052 SelectionDAG &DAG) {
27053 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
27054 SDValue DataToStore = N->getValue();
27055 MVT VT = DataToStore.getSimpleValueType();
27056 MVT ScalarVT = VT.getScalarType();
27057 SDValue Mask = N->getMask();
27060 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
27061 "Expanding masked load is supported on AVX-512 target only!");
27063 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
27064 "Expanding masked load is supported for 32 and 64-bit types only!");
27066 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27067 "Cannot lower masked store op.");
27069 assert((ScalarVT.getSizeInBits() >= 32 ||
27070 (Subtarget.hasBWI() &&
27071 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
27072 "Unsupported masked store op.");
27074 // This operation is legal for targets with VLX, but without
27075 // VLX the vector should be widened to 512 bit
27076 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
27077 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27079 // Mask element has to be i1.
27080 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
27081 "Unexpected mask type");
27083 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27085 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
27086 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27087 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
27088 Mask, N->getMemoryVT(), N->getMemOperand(),
27089 N->isTruncatingStore(), N->isCompressingStore());
27092 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
27093 SelectionDAG &DAG) {
27094 assert(Subtarget.hasAVX2() &&
27095 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
27097 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
27099 MVT VT = Op.getSimpleValueType();
27100 SDValue Index = N->getIndex();
27101 SDValue Mask = N->getMask();
27102 SDValue PassThru = N->getPassThru();
27103 MVT IndexVT = Index.getSimpleValueType();
27104 MVT MaskVT = Mask.getSimpleValueType();
27106 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
27108 // If the index is v2i32, we're being called by type legalization.
27109 if (IndexVT == MVT::v2i32)
27112 // If we don't have VLX and neither the passthru or index is 512-bits, we
27113 // need to widen until one is.
27115 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27116 !IndexVT.is512BitVector()) {
27117 // Determine how much we need to widen by to get a 512-bit type.
27118 unsigned Factor = std::min(512/VT.getSizeInBits(),
27119 512/IndexVT.getSizeInBits());
27121 unsigned NumElts = VT.getVectorNumElements() * Factor;
27123 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27124 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27125 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27127 PassThru = ExtendToType(PassThru, VT, DAG);
27128 Index = ExtendToType(Index, IndexVT, DAG);
27129 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27132 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
27134 SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
27135 DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
27136 N->getMemOperand());
27137 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
27138 NewGather, DAG.getIntPtrConstant(0, dl));
27139 return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
27142 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
27143 SelectionDAG &DAG) const {
27144 // TODO: Eventually, the lowering of these nodes should be informed by or
27145 // deferred to the GC strategy for the function in which they appear. For
27146 // now, however, they must be lowered to something. Since they are logically
27147 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27148 // require special handling for these nodes), lower them as literal NOOPs for
27150 SmallVector<SDValue, 2> Ops;
27152 Ops.push_back(Op.getOperand(0));
27153 if (Op->getGluedNode())
27154 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27157 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27158 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27163 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
27164 SelectionDAG &DAG) const {
27165 // TODO: Eventually, the lowering of these nodes should be informed by or
27166 // deferred to the GC strategy for the function in which they appear. For
27167 // now, however, they must be lowered to something. Since they are logically
27168 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27169 // require special handling for these nodes), lower them as literal NOOPs for
27171 SmallVector<SDValue, 2> Ops;
27173 Ops.push_back(Op.getOperand(0));
27174 if (Op->getGluedNode())
27175 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27178 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27179 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27184 /// Provide custom lowering hooks for some operations.
27185 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
27186 switch (Op.getOpcode()) {
27187 default: llvm_unreachable("Should not custom lower this!");
27188 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
27189 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
27190 return LowerCMP_SWAP(Op, Subtarget, DAG);
27191 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
27192 case ISD::ATOMIC_LOAD_ADD:
27193 case ISD::ATOMIC_LOAD_SUB:
27194 case ISD::ATOMIC_LOAD_OR:
27195 case ISD::ATOMIC_LOAD_XOR:
27196 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
27197 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
27198 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
27199 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
27200 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
27201 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
27202 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
27203 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
27204 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
27205 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
27206 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
27207 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
27208 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
27209 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
27210 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
27211 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
27212 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
27213 case ISD::SHL_PARTS:
27214 case ISD::SRA_PARTS:
27215 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
27217 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
27218 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
27219 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
27220 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
27221 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
27222 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
27223 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
27224 case ISD::ZERO_EXTEND_VECTOR_INREG:
27225 case ISD::SIGN_EXTEND_VECTOR_INREG:
27226 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
27227 case ISD::FP_TO_SINT:
27228 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
27229 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
27230 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
27231 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
27233 case ISD::FSUB: return lowerFaddFsub(Op, DAG, Subtarget);
27235 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
27236 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
27237 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
27238 case ISD::SETCC: return LowerSETCC(Op, DAG);
27239 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
27240 case ISD::SELECT: return LowerSELECT(Op, DAG);
27241 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
27242 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
27243 case ISD::VASTART: return LowerVASTART(Op, DAG);
27244 case ISD::VAARG: return LowerVAARG(Op, DAG);
27245 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
27246 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
27247 case ISD::INTRINSIC_VOID:
27248 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
27249 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
27250 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
27251 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
27252 case ISD::FRAME_TO_ARGS_OFFSET:
27253 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
27254 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
27255 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
27256 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
27257 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
27258 case ISD::EH_SJLJ_SETUP_DISPATCH:
27259 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
27260 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
27261 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
27262 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
27264 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
27266 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
27267 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
27269 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
27271 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
27274 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
27280 case ISD::UMULO: return LowerXALUO(Op, DAG);
27281 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
27282 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
27283 case ISD::ADDCARRY:
27284 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
27286 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
27290 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
27294 case ISD::UMIN: return LowerMINMAX(Op, DAG);
27295 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
27296 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
27297 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
27298 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
27299 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
27300 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
27301 case ISD::GC_TRANSITION_START:
27302 return LowerGC_TRANSITION_START(Op, DAG);
27303 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
27307 /// Places new result values for the node in Results (their number
27308 /// and types must exactly match those of the original return values of
27309 /// the node), or leaves Results empty, which indicates that the node is not
27310 /// to be custom lowered after all.
27311 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
27312 SmallVectorImpl<SDValue> &Results,
27313 SelectionDAG &DAG) const {
27314 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
27316 if (!Res.getNode())
27319 // If the original node has one result, take the return value from
27320 // LowerOperation as is. It might not be result number 0.
27321 if (N->getNumValues() == 1) {
27322 Results.push_back(Res);
27326 // If the original node has multiple results, then the return node should
27327 // have the same number of results.
27328 assert((N->getNumValues() == Res->getNumValues()) &&
27329 "Lowering returned the wrong number of results!");
27331 // Places new result values base on N result number.
27332 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
27333 Results.push_back(Res.getValue(I));
27336 /// Replace a node with an illegal result type with a new node built out of
27338 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
27339 SmallVectorImpl<SDValue>&Results,
27340 SelectionDAG &DAG) const {
27342 switch (N->getOpcode()) {
27345 dbgs() << "ReplaceNodeResults: ";
27348 llvm_unreachable("Do not know how to custom type legalize this operation!");
27350 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
27351 // Use a v2i64 if possible.
27352 bool NoImplicitFloatOps =
27353 DAG.getMachineFunction().getFunction().hasFnAttribute(
27354 Attribute::NoImplicitFloat);
27355 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
27357 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
27358 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
27359 // Bit count should fit in 32-bits, extract it as that and then zero
27360 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
27361 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
27362 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
27363 DAG.getIntPtrConstant(0, dl));
27364 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
27365 Results.push_back(Wide);
27370 EVT VT = N->getValueType(0);
27371 assert(VT.isVector() && "Unexpected VT");
27372 if (getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger &&
27373 VT.getVectorNumElements() == 2) {
27374 // Promote to a pattern that will be turned into PMULUDQ.
27375 SDValue N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
27377 SDValue N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64,
27379 SDValue Mul = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, N0, N1);
27380 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, VT, Mul));
27381 } else if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
27382 VT.getVectorElementType() == MVT::i8) {
27383 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
27384 // elements are needed.
27385 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
27386 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
27387 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
27388 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
27389 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27390 unsigned NumConcats = 16 / VT.getVectorNumElements();
27391 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27392 ConcatOps[0] = Res;
27393 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
27394 Results.push_back(Res);
27402 case X86ISD::VPMADDWD:
27403 case X86ISD::AVG: {
27404 // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
27405 // X86ISD::AVG/VPMADDWD by widening.
27406 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27408 EVT VT = N->getValueType(0);
27409 EVT InVT = N->getOperand(0).getValueType();
27410 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
27411 "Expected a VT that divides into 128 bits.");
27412 unsigned NumConcat = 128 / InVT.getSizeInBits();
27414 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
27415 InVT.getVectorElementType(),
27416 NumConcat * InVT.getVectorNumElements());
27417 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
27418 VT.getVectorElementType(),
27419 NumConcat * VT.getVectorNumElements());
27421 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
27422 Ops[0] = N->getOperand(0);
27423 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27424 Ops[0] = N->getOperand(1);
27425 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27427 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
27428 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27429 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
27430 DAG.getIntPtrConstant(0, dl));
27431 Results.push_back(Res);
27435 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27436 assert(N->getValueType(0) == MVT::i64 &&
27437 "Unexpected type (!= i64) on ABS.");
27438 MVT HalfT = MVT::i32;
27439 SDValue Lo, Hi, Tmp;
27440 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
27442 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27443 DAG.getConstant(0, dl, HalfT));
27444 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27445 DAG.getConstant(1, dl, HalfT));
27447 ISD::SRA, dl, HalfT, Hi,
27448 DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
27449 TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
27450 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
27451 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
27452 SDValue(Lo.getNode(), 1));
27453 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
27454 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
27455 Results.push_back(Lo);
27456 Results.push_back(Hi);
27460 // Widen v2i32 (setcc v2f32). This is really needed for AVX512VL when
27461 // setCC result type is v2i1 because type legalzation will end up with
27462 // a v4i1 setcc plus an extend.
27463 assert(N->getValueType(0) == MVT::v2i32 && "Unexpected type");
27464 if (N->getOperand(0).getValueType() != MVT::v2f32 ||
27465 getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector)
27467 SDValue UNDEF = DAG.getUNDEF(MVT::v2f32);
27468 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27469 N->getOperand(0), UNDEF);
27470 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27471 N->getOperand(1), UNDEF);
27472 SDValue Res = DAG.getNode(ISD::SETCC, dl, MVT::v4i32, LHS, RHS,
27474 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
27475 DAG.getIntPtrConstant(0, dl));
27476 Results.push_back(Res);
27479 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
27480 case X86ISD::FMINC:
27482 case X86ISD::FMAXC:
27483 case X86ISD::FMAX: {
27484 EVT VT = N->getValueType(0);
27485 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
27486 SDValue UNDEF = DAG.getUNDEF(VT);
27487 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27488 N->getOperand(0), UNDEF);
27489 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27490 N->getOperand(1), UNDEF);
27491 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
27498 EVT VT = N->getValueType(0);
27499 if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector) {
27500 // If this RHS is a constant splat vector we can widen this and let
27501 // division/remainder by constant optimize it.
27502 // TODO: Can we do something for non-splat?
27504 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
27505 unsigned NumConcats = 128 / VT.getSizeInBits();
27506 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
27507 Ops0[0] = N->getOperand(0);
27508 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
27509 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
27510 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
27511 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
27512 Results.push_back(Res);
27517 if (VT == MVT::v2i32) {
27518 // Legalize v2i32 div/rem by unrolling. Otherwise we promote to the
27519 // v2i64 and unroll later. But then we create i64 scalar ops which
27520 // might be slow in 64-bit mode or require a libcall in 32-bit mode.
27521 Results.push_back(DAG.UnrollVectorOp(N));
27531 case ISD::UDIVREM: {
27532 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
27533 Results.push_back(V);
27536 case ISD::TRUNCATE: {
27537 MVT VT = N->getSimpleValueType(0);
27538 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27541 // The generic legalizer will try to widen the input type to the same
27542 // number of elements as the widened result type. But this isn't always
27543 // the best thing so do some custom legalization to avoid some cases.
27544 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
27545 SDValue In = N->getOperand(0);
27546 EVT InVT = In.getValueType();
27548 unsigned InBits = InVT.getSizeInBits();
27549 if (128 % InBits == 0) {
27550 // 128 bit and smaller inputs should avoid truncate all together and
27551 // just use a build_vector that will become a shuffle.
27552 // TODO: Widen and use a shuffle directly?
27553 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
27554 EVT EltVT = VT.getVectorElementType();
27555 unsigned WidenNumElts = WidenVT.getVectorNumElements();
27556 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
27557 // Use the original element count so we don't do more scalar opts than
27559 unsigned MinElts = VT.getVectorNumElements();
27560 for (unsigned i=0; i < MinElts; ++i) {
27561 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
27562 DAG.getIntPtrConstant(i, dl));
27563 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
27565 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
27568 // With AVX512 there are some cases that can use a target specific
27569 // truncate node to go from 256/512 to less than 128 with zeros in the
27570 // upper elements of the 128 bit result.
27571 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
27572 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
27573 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
27574 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27577 // There's one case we can widen to 512 bits and use VTRUNC.
27578 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
27579 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
27580 DAG.getUNDEF(MVT::v4i64));
27581 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27587 case ISD::SIGN_EXTEND_VECTOR_INREG: {
27588 if (ExperimentalVectorWideningLegalization)
27591 EVT VT = N->getValueType(0);
27592 SDValue In = N->getOperand(0);
27593 EVT InVT = In.getValueType();
27594 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
27595 (InVT == MVT::v16i16 || InVT == MVT::v32i8)) {
27596 // Custom split this so we can extend i8/i16->i32 invec. This is better
27597 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
27598 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
27599 // we allow the sra from the extend to i32 to be shared by the split.
27600 EVT ExtractVT = EVT::getVectorVT(*DAG.getContext(),
27601 InVT.getVectorElementType(),
27602 InVT.getVectorNumElements() / 2);
27603 MVT ExtendVT = MVT::getVectorVT(MVT::i32,
27604 VT.getVectorNumElements());
27605 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExtractVT,
27606 In, DAG.getIntPtrConstant(0, dl));
27607 In = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, MVT::v4i32, In);
27609 // Fill a vector with sign bits for each element.
27610 SDValue Zero = DAG.getConstant(0, dl, ExtendVT);
27611 SDValue SignBits = DAG.getSetCC(dl, ExtendVT, Zero, In, ISD::SETGT);
27614 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
27616 // Create an unpackl and unpackh to interleave the sign bits then bitcast
27618 SDValue Lo = getUnpackl(DAG, dl, ExtendVT, In, SignBits);
27619 Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
27620 SDValue Hi = getUnpackh(DAG, dl, ExtendVT, In, SignBits);
27621 Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
27623 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27624 Results.push_back(Res);
27629 case ISD::SIGN_EXTEND:
27630 case ISD::ZERO_EXTEND: {
27631 EVT VT = N->getValueType(0);
27632 SDValue In = N->getOperand(0);
27633 EVT InVT = In.getValueType();
27634 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
27635 (InVT == MVT::v4i16 || InVT == MVT::v4i8) &&
27636 getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector) {
27637 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
27638 // Custom split this so we can extend i8/i16->i32 invec. This is better
27639 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
27640 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
27641 // we allow the sra from the extend to i32 to be shared by the split.
27642 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
27644 // Fill a vector with sign bits for each element.
27645 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
27646 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
27648 // Create an unpackl and unpackh to interleave the sign bits then bitcast
27650 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
27652 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
27653 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
27655 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
27657 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27658 Results.push_back(Res);
27662 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
27663 if (!InVT.is128BitVector()) {
27664 // Not a 128 bit vector, but maybe type legalization will promote
27666 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
27668 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
27669 if (!InVT.is128BitVector())
27672 // Promote the input to 128 bits. Type legalization will turn this into
27673 // zext_inreg/sext_inreg.
27674 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
27677 // Perform custom splitting instead of the two stage extend we would get
27680 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
27681 assert(isTypeLegal(LoVT) && "Split VT not legal?");
27683 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
27685 // We need to shift the input over by half the number of elements.
27686 unsigned NumElts = InVT.getVectorNumElements();
27687 unsigned HalfNumElts = NumElts / 2;
27688 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
27689 for (unsigned i = 0; i != HalfNumElts; ++i)
27690 ShufMask[i] = i + HalfNumElts;
27692 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
27693 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
27695 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
27696 Results.push_back(Res);
27700 case ISD::FP_TO_SINT:
27701 case ISD::FP_TO_UINT: {
27702 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
27703 EVT VT = N->getValueType(0);
27704 SDValue Src = N->getOperand(0);
27705 EVT SrcVT = Src.getValueType();
27707 // Promote these manually to avoid over promotion to v2i64. Type
27708 // legalization will revisit the v2i32 operation for more cleanup.
27709 if ((VT == MVT::v2i8 || VT == MVT::v2i16) &&
27710 getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger) {
27711 // AVX512DQ provides instructions that produce a v2i64 result.
27712 if (Subtarget.hasDQI())
27715 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v2i32, Src);
27716 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
27718 dl, MVT::v2i32, Res,
27719 DAG.getValueType(VT.getVectorElementType()));
27720 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27721 Results.push_back(Res);
27725 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
27726 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27729 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
27730 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
27731 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
27732 VT.getVectorNumElements());
27733 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
27735 // Preserve what we know about the size of the original result. Except
27736 // when the result is v2i32 since we can't widen the assert.
27737 if (PromoteVT != MVT::v2i32)
27738 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
27740 dl, PromoteVT, Res,
27741 DAG.getValueType(VT.getVectorElementType()));
27743 // Truncate back to the original width.
27744 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27746 // Now widen to 128 bits.
27747 unsigned NumConcats = 128 / VT.getSizeInBits();
27748 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
27749 VT.getVectorNumElements() * NumConcats);
27750 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27751 ConcatOps[0] = Res;
27752 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
27753 Results.push_back(Res);
27758 if (VT == MVT::v2i32) {
27759 assert((IsSigned || Subtarget.hasAVX512()) &&
27760 "Can only handle signed conversion without AVX512");
27761 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27763 getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector;
27764 if (Src.getValueType() == MVT::v2f64) {
27765 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
27766 if (!IsSigned && !Subtarget.hasVLX()) {
27767 // If v2i32 is widened, we can defer to the generic legalizer.
27770 // Custom widen by doubling to a legal vector with. Isel will
27771 // further widen to v8f64.
27772 Opc = ISD::FP_TO_UINT;
27773 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64,
27774 Src, DAG.getUNDEF(MVT::v2f64));
27776 SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
27778 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
27779 DAG.getIntPtrConstant(0, dl));
27780 Results.push_back(Res);
27783 if (SrcVT == MVT::v2f32 &&
27784 getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
27785 SDValue Idx = DAG.getIntPtrConstant(0, dl);
27786 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
27787 DAG.getUNDEF(MVT::v2f32));
27788 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT
27789 : ISD::FP_TO_UINT, dl, MVT::v4i32, Res);
27790 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
27791 Results.push_back(Res);
27795 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
27796 // so early out here.
27800 if (Subtarget.hasDQI() && VT == MVT::i64 &&
27801 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
27802 assert(!Subtarget.is64Bit() && "i64 should be legal");
27803 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
27804 // Using a 256-bit input here to guarantee 128-bit input for f32 case.
27805 // TODO: Use 128-bit vectors for f64 case?
27806 // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
27807 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
27808 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
27810 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
27811 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
27812 DAG.getConstantFP(0.0, dl, VecInVT), Src,
27814 Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
27815 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
27816 Results.push_back(Res);
27820 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
27821 Results.push_back(V);
27824 case ISD::SINT_TO_FP: {
27825 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
27826 SDValue Src = N->getOperand(0);
27827 if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
27829 Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
27832 case ISD::UINT_TO_FP: {
27833 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27834 EVT VT = N->getValueType(0);
27835 if (VT != MVT::v2f32)
27837 SDValue Src = N->getOperand(0);
27838 EVT SrcVT = Src.getValueType();
27839 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
27840 Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
27843 if (SrcVT != MVT::v2i32)
27845 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
27847 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
27848 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
27849 DAG.getBitcast(MVT::v2i64, VBias));
27850 Or = DAG.getBitcast(MVT::v2f64, Or);
27851 // TODO: Are there any fast-math-flags to propagate here?
27852 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
27853 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
27856 case ISD::FP_ROUND: {
27857 if (!isTypeLegal(N->getOperand(0).getValueType()))
27859 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
27860 Results.push_back(V);
27863 case ISD::FP_EXTEND: {
27864 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
27865 // No other ValueType for FP_EXTEND should reach this point.
27866 assert(N->getValueType(0) == MVT::v2f32 &&
27867 "Do not know how to legalize this Node");
27870 case ISD::INTRINSIC_W_CHAIN: {
27871 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
27873 default : llvm_unreachable("Do not know how to custom type "
27874 "legalize this intrinsic operation!");
27875 case Intrinsic::x86_rdtsc:
27876 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
27878 case Intrinsic::x86_rdtscp:
27879 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
27881 case Intrinsic::x86_rdpmc:
27882 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
27885 case Intrinsic::x86_xgetbv:
27886 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
27891 case ISD::READCYCLECOUNTER: {
27892 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
27894 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
27895 EVT T = N->getValueType(0);
27896 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
27897 bool Regs64bit = T == MVT::i128;
27898 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
27899 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
27900 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
27901 SDValue cpInL, cpInH;
27902 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
27903 DAG.getConstant(0, dl, HalfT));
27904 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
27905 DAG.getConstant(1, dl, HalfT));
27906 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
27907 Regs64bit ? X86::RAX : X86::EAX,
27909 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
27910 Regs64bit ? X86::RDX : X86::EDX,
27911 cpInH, cpInL.getValue(1));
27912 SDValue swapInL, swapInH;
27913 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
27914 DAG.getConstant(0, dl, HalfT));
27915 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
27916 DAG.getConstant(1, dl, HalfT));
27918 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
27919 swapInH, cpInH.getValue(1));
27920 // If the current function needs the base pointer, RBX,
27921 // we shouldn't use cmpxchg directly.
27922 // Indeed the lowering of that instruction will clobber
27923 // that register and since RBX will be a reserved register
27924 // the register allocator will not make sure its value will
27925 // be properly saved and restored around this live-range.
27926 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
27928 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27929 unsigned BasePtr = TRI->getBaseRegister();
27930 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
27931 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
27932 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
27933 // ISel prefers the LCMPXCHG64 variant.
27934 // If that assert breaks, that means it is not the case anymore,
27935 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
27936 // not just EBX. This is a matter of accepting i64 input for that
27937 // pseudo, and restoring into the register of the right wide
27938 // in expand pseudo. Everything else should just work.
27939 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
27940 "Saving only half of the RBX");
27941 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
27942 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
27943 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
27944 Regs64bit ? X86::RBX : X86::EBX,
27945 HalfT, swapInH.getValue(1));
27946 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
27948 /*Glue*/ RBXSave.getValue(2)};
27949 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
27952 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
27953 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
27954 Regs64bit ? X86::RBX : X86::EBX, swapInL,
27955 swapInH.getValue(1));
27956 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
27957 swapInL.getValue(1)};
27958 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
27960 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
27961 Regs64bit ? X86::RAX : X86::EAX,
27962 HalfT, Result.getValue(1));
27963 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
27964 Regs64bit ? X86::RDX : X86::EDX,
27965 HalfT, cpOutL.getValue(2));
27966 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
27968 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
27969 MVT::i32, cpOutH.getValue(2));
27970 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
27971 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
27973 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
27974 Results.push_back(Success);
27975 Results.push_back(EFLAGS.getValue(1));
27978 case ISD::ATOMIC_LOAD: {
27979 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
27980 bool NoImplicitFloatOps =
27981 DAG.getMachineFunction().getFunction().hasFnAttribute(
27982 Attribute::NoImplicitFloat);
27983 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
27984 auto *Node = cast<AtomicSDNode>(N);
27985 if (Subtarget.hasSSE2()) {
27986 // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
27988 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
27989 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
27990 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
27991 MVT::i64, Node->getMemOperand());
27992 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
27993 DAG.getIntPtrConstant(0, dl));
27994 Results.push_back(Res);
27995 Results.push_back(Ld.getValue(1));
27998 if (Subtarget.hasX87()) {
27999 // First load this into an 80-bit X87 register. This will put the whole
28000 // integer into the significand.
28001 // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
28002 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
28003 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
28004 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
28005 dl, Tys, Ops, MVT::i64,
28006 Node->getMemOperand());
28007 SDValue Chain = Result.getValue(1);
28008 SDValue InFlag = Result.getValue(2);
28010 // Now store the X87 register to a stack temporary and convert to i64.
28011 // This store is not atomic and doesn't need to be.
28012 // FIXME: We don't need a stack temporary if the result of the load
28013 // is already being stored. We could just directly store there.
28014 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
28015 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28016 MachinePointerInfo MPI =
28017 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28018 SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
28019 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
28020 DAG.getVTList(MVT::Other), StoreOps,
28021 MVT::i64, MPI, 0 /*Align*/,
28022 MachineMemOperand::MOStore);
28024 // Finally load the value back from the stack temporary and return it.
28025 // This load is not atomic and doesn't need to be.
28026 // This load will be further type legalized.
28027 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
28028 Results.push_back(Result);
28029 Results.push_back(Result.getValue(1));
28033 // TODO: Use MOVLPS when SSE1 is available?
28034 // Delegate to generic TypeLegalization. Situations we can really handle
28035 // should have already been dealt with by AtomicExpandPass.cpp.
28038 case ISD::ATOMIC_SWAP:
28039 case ISD::ATOMIC_LOAD_ADD:
28040 case ISD::ATOMIC_LOAD_SUB:
28041 case ISD::ATOMIC_LOAD_AND:
28042 case ISD::ATOMIC_LOAD_OR:
28043 case ISD::ATOMIC_LOAD_XOR:
28044 case ISD::ATOMIC_LOAD_NAND:
28045 case ISD::ATOMIC_LOAD_MIN:
28046 case ISD::ATOMIC_LOAD_MAX:
28047 case ISD::ATOMIC_LOAD_UMIN:
28048 case ISD::ATOMIC_LOAD_UMAX:
28049 // Delegate to generic TypeLegalization. Situations we can really handle
28050 // should have already been dealt with by AtomicExpandPass.cpp.
28053 case ISD::BITCAST: {
28054 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28055 EVT DstVT = N->getValueType(0);
28056 EVT SrcVT = N->getOperand(0).getValueType();
28058 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
28059 // we can split using the k-register rather than memory.
28060 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
28061 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
28063 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28064 Lo = DAG.getBitcast(MVT::i32, Lo);
28065 Hi = DAG.getBitcast(MVT::i32, Hi);
28066 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
28067 Results.push_back(Res);
28071 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
28072 if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
28073 SrcVT.isVector() && isTypeLegal(SrcVT)) {
28075 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28076 MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
28077 Lo = DAG.getBitcast(CastVT, Lo);
28078 Hi = DAG.getBitcast(CastVT, Hi);
28079 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
28080 Results.push_back(Res);
28084 if (SrcVT != MVT::f64 ||
28085 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8) ||
28086 getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector)
28089 unsigned NumElts = DstVT.getVectorNumElements();
28090 EVT SVT = DstVT.getVectorElementType();
28091 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
28093 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, N->getOperand(0));
28094 Res = DAG.getBitcast(WiderVT, Res);
28095 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res,
28096 DAG.getIntPtrConstant(0, dl));
28097 Results.push_back(Res);
28100 case ISD::MGATHER: {
28101 EVT VT = N->getValueType(0);
28102 if (VT == MVT::v2f32 && (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28103 auto *Gather = cast<MaskedGatherSDNode>(N);
28104 SDValue Index = Gather->getIndex();
28105 if (Index.getValueType() != MVT::v2i64)
28107 SDValue Mask = Gather->getMask();
28108 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28109 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28110 Gather->getPassThru(),
28111 DAG.getUNDEF(MVT::v2f32));
28112 if (!Subtarget.hasVLX()) {
28113 // We need to widen the mask, but the instruction will only use 2
28114 // of its elements. So we can use undef.
28115 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28116 DAG.getUNDEF(MVT::v2i1));
28117 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28119 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28120 Gather->getBasePtr(), Index, Gather->getScale() };
28121 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28122 DAG.getVTList(MVT::v4f32, Mask.getValueType(), MVT::Other), Ops, dl,
28123 Gather->getMemoryVT(), Gather->getMemOperand());
28124 Results.push_back(Res);
28125 Results.push_back(Res.getValue(2));
28128 if (VT == MVT::v2i32) {
28129 auto *Gather = cast<MaskedGatherSDNode>(N);
28130 SDValue Index = Gather->getIndex();
28131 SDValue Mask = Gather->getMask();
28132 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28133 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32,
28134 Gather->getPassThru(),
28135 DAG.getUNDEF(MVT::v2i32));
28136 // If the index is v2i64 we can use it directly.
28137 if (Index.getValueType() == MVT::v2i64 &&
28138 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28139 if (!Subtarget.hasVLX()) {
28140 // We need to widen the mask, but the instruction will only use 2
28141 // of its elements. So we can use undef.
28142 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28143 DAG.getUNDEF(MVT::v2i1));
28144 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28146 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28147 Gather->getBasePtr(), Index, Gather->getScale() };
28148 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28149 DAG.getVTList(MVT::v4i32, Mask.getValueType(), MVT::Other), Ops, dl,
28150 Gather->getMemoryVT(), Gather->getMemOperand());
28151 SDValue Chain = Res.getValue(2);
28152 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28153 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
28154 DAG.getIntPtrConstant(0, dl));
28155 Results.push_back(Res);
28156 Results.push_back(Chain);
28159 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) {
28160 EVT IndexVT = Index.getValueType();
28161 EVT NewIndexVT = EVT::getVectorVT(*DAG.getContext(),
28162 IndexVT.getScalarType(), 4);
28163 // Otherwise we need to custom widen everything to avoid promotion.
28164 Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
28165 DAG.getUNDEF(IndexVT));
28166 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28167 DAG.getConstant(0, dl, MVT::v2i1));
28168 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28169 Gather->getBasePtr(), Index, Gather->getScale() };
28170 SDValue Res = DAG.getMaskedGather(DAG.getVTList(MVT::v4i32, MVT::Other),
28171 Gather->getMemoryVT(), dl, Ops,
28172 Gather->getMemOperand());
28173 SDValue Chain = Res.getValue(1);
28174 if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
28175 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
28176 DAG.getIntPtrConstant(0, dl));
28177 Results.push_back(Res);
28178 Results.push_back(Chain);
28185 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
28186 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
28187 // cast since type legalization will try to use an i64 load.
28188 MVT VT = N->getSimpleValueType(0);
28189 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
28190 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28192 if (!ISD::isNON_EXTLoad(N))
28194 auto *Ld = cast<LoadSDNode>(N);
28195 if (Subtarget.hasSSE2()) {
28196 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
28197 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
28198 Ld->getPointerInfo(), Ld->getAlignment(),
28199 Ld->getMemOperand()->getFlags());
28200 SDValue Chain = Res.getValue(1);
28201 MVT WideVT = MVT::getVectorVT(LdVT, 2);
28202 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, WideVT, Res);
28203 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(),
28204 VT.getVectorNumElements() * 2);
28205 Res = DAG.getBitcast(CastVT, Res);
28206 Results.push_back(Res);
28207 Results.push_back(Chain);
28210 assert(Subtarget.hasSSE1() && "Expected SSE");
28211 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
28212 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
28213 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
28214 MVT::i64, Ld->getMemOperand());
28215 Results.push_back(Res);
28216 Results.push_back(Res.getValue(1));
28222 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
28223 switch ((X86ISD::NodeType)Opcode) {
28224 case X86ISD::FIRST_NUMBER: break;
28225 case X86ISD::BSF: return "X86ISD::BSF";
28226 case X86ISD::BSR: return "X86ISD::BSR";
28227 case X86ISD::SHLD: return "X86ISD::SHLD";
28228 case X86ISD::SHRD: return "X86ISD::SHRD";
28229 case X86ISD::FAND: return "X86ISD::FAND";
28230 case X86ISD::FANDN: return "X86ISD::FANDN";
28231 case X86ISD::FOR: return "X86ISD::FOR";
28232 case X86ISD::FXOR: return "X86ISD::FXOR";
28233 case X86ISD::FILD: return "X86ISD::FILD";
28234 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
28235 case X86ISD::FIST: return "X86ISD::FIST";
28236 case X86ISD::FP_TO_INT_IN_MEM: return "X86ISD::FP_TO_INT_IN_MEM";
28237 case X86ISD::FLD: return "X86ISD::FLD";
28238 case X86ISD::FST: return "X86ISD::FST";
28239 case X86ISD::CALL: return "X86ISD::CALL";
28240 case X86ISD::BT: return "X86ISD::BT";
28241 case X86ISD::CMP: return "X86ISD::CMP";
28242 case X86ISD::COMI: return "X86ISD::COMI";
28243 case X86ISD::UCOMI: return "X86ISD::UCOMI";
28244 case X86ISD::CMPM: return "X86ISD::CMPM";
28245 case X86ISD::CMPM_SAE: return "X86ISD::CMPM_SAE";
28246 case X86ISD::SETCC: return "X86ISD::SETCC";
28247 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
28248 case X86ISD::FSETCC: return "X86ISD::FSETCC";
28249 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
28250 case X86ISD::FSETCCM_SAE: return "X86ISD::FSETCCM_SAE";
28251 case X86ISD::CMOV: return "X86ISD::CMOV";
28252 case X86ISD::BRCOND: return "X86ISD::BRCOND";
28253 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
28254 case X86ISD::IRET: return "X86ISD::IRET";
28255 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
28256 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
28257 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
28258 case X86ISD::Wrapper: return "X86ISD::Wrapper";
28259 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
28260 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
28261 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
28262 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
28263 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
28264 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
28265 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
28266 case X86ISD::PINSRB: return "X86ISD::PINSRB";
28267 case X86ISD::PINSRW: return "X86ISD::PINSRW";
28268 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
28269 case X86ISD::ANDNP: return "X86ISD::ANDNP";
28270 case X86ISD::BLENDI: return "X86ISD::BLENDI";
28271 case X86ISD::BLENDV: return "X86ISD::BLENDV";
28272 case X86ISD::HADD: return "X86ISD::HADD";
28273 case X86ISD::HSUB: return "X86ISD::HSUB";
28274 case X86ISD::FHADD: return "X86ISD::FHADD";
28275 case X86ISD::FHSUB: return "X86ISD::FHSUB";
28276 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
28277 case X86ISD::FMAX: return "X86ISD::FMAX";
28278 case X86ISD::FMAXS: return "X86ISD::FMAXS";
28279 case X86ISD::FMAX_SAE: return "X86ISD::FMAX_SAE";
28280 case X86ISD::FMAXS_SAE: return "X86ISD::FMAXS_SAE";
28281 case X86ISD::FMIN: return "X86ISD::FMIN";
28282 case X86ISD::FMINS: return "X86ISD::FMINS";
28283 case X86ISD::FMIN_SAE: return "X86ISD::FMIN_SAE";
28284 case X86ISD::FMINS_SAE: return "X86ISD::FMINS_SAE";
28285 case X86ISD::FMAXC: return "X86ISD::FMAXC";
28286 case X86ISD::FMINC: return "X86ISD::FMINC";
28287 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
28288 case X86ISD::FRCP: return "X86ISD::FRCP";
28289 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
28290 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
28291 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
28292 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
28293 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
28294 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
28295 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
28296 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
28297 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
28298 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
28299 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
28300 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
28301 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
28302 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
28303 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
28304 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
28305 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
28306 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
28307 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
28308 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
28309 case X86ISD::LADD: return "X86ISD::LADD";
28310 case X86ISD::LSUB: return "X86ISD::LSUB";
28311 case X86ISD::LOR: return "X86ISD::LOR";
28312 case X86ISD::LXOR: return "X86ISD::LXOR";
28313 case X86ISD::LAND: return "X86ISD::LAND";
28314 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
28315 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
28316 case X86ISD::VEXTRACT_STORE: return "X86ISD::VEXTRACT_STORE";
28317 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
28318 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
28319 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
28320 case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
28321 case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
28322 case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
28323 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
28324 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
28325 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
28326 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
28327 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
28328 case X86ISD::VFPEXT_SAE: return "X86ISD::VFPEXT_SAE";
28329 case X86ISD::VFPEXTS: return "X86ISD::VFPEXTS";
28330 case X86ISD::VFPEXTS_SAE: return "X86ISD::VFPEXTS_SAE";
28331 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
28332 case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
28333 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
28334 case X86ISD::VFPROUNDS: return "X86ISD::VFPROUNDS";
28335 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
28336 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
28337 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
28338 case X86ISD::VSHL: return "X86ISD::VSHL";
28339 case X86ISD::VSRL: return "X86ISD::VSRL";
28340 case X86ISD::VSRA: return "X86ISD::VSRA";
28341 case X86ISD::VSHLI: return "X86ISD::VSHLI";
28342 case X86ISD::VSRLI: return "X86ISD::VSRLI";
28343 case X86ISD::VSRAI: return "X86ISD::VSRAI";
28344 case X86ISD::VSHLV: return "X86ISD::VSHLV";
28345 case X86ISD::VSRLV: return "X86ISD::VSRLV";
28346 case X86ISD::VSRAV: return "X86ISD::VSRAV";
28347 case X86ISD::VROTLI: return "X86ISD::VROTLI";
28348 case X86ISD::VROTRI: return "X86ISD::VROTRI";
28349 case X86ISD::VPPERM: return "X86ISD::VPPERM";
28350 case X86ISD::CMPP: return "X86ISD::CMPP";
28351 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
28352 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
28353 case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
28354 case X86ISD::ADD: return "X86ISD::ADD";
28355 case X86ISD::SUB: return "X86ISD::SUB";
28356 case X86ISD::ADC: return "X86ISD::ADC";
28357 case X86ISD::SBB: return "X86ISD::SBB";
28358 case X86ISD::SMUL: return "X86ISD::SMUL";
28359 case X86ISD::UMUL: return "X86ISD::UMUL";
28360 case X86ISD::OR: return "X86ISD::OR";
28361 case X86ISD::XOR: return "X86ISD::XOR";
28362 case X86ISD::AND: return "X86ISD::AND";
28363 case X86ISD::BEXTR: return "X86ISD::BEXTR";
28364 case X86ISD::BZHI: return "X86ISD::BZHI";
28365 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
28366 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
28367 case X86ISD::PTEST: return "X86ISD::PTEST";
28368 case X86ISD::TESTP: return "X86ISD::TESTP";
28369 case X86ISD::KORTEST: return "X86ISD::KORTEST";
28370 case X86ISD::KTEST: return "X86ISD::KTEST";
28371 case X86ISD::KADD: return "X86ISD::KADD";
28372 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
28373 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
28374 case X86ISD::PACKSS: return "X86ISD::PACKSS";
28375 case X86ISD::PACKUS: return "X86ISD::PACKUS";
28376 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
28377 case X86ISD::VALIGN: return "X86ISD::VALIGN";
28378 case X86ISD::VSHLD: return "X86ISD::VSHLD";
28379 case X86ISD::VSHRD: return "X86ISD::VSHRD";
28380 case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
28381 case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
28382 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
28383 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
28384 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
28385 case X86ISD::SHUFP: return "X86ISD::SHUFP";
28386 case X86ISD::SHUF128: return "X86ISD::SHUF128";
28387 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
28388 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
28389 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
28390 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
28391 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
28392 case X86ISD::MOVSD: return "X86ISD::MOVSD";
28393 case X86ISD::MOVSS: return "X86ISD::MOVSS";
28394 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
28395 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
28396 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
28397 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
28398 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
28399 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
28400 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
28401 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
28402 case X86ISD::VPERMV: return "X86ISD::VPERMV";
28403 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
28404 case X86ISD::VPERMI: return "X86ISD::VPERMI";
28405 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
28406 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
28407 case X86ISD::VFIXUPIMM_SAE: return "X86ISD::VFIXUPIMM_SAE";
28408 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
28409 case X86ISD::VFIXUPIMMS_SAE: return "X86ISD::VFIXUPIMMS_SAE";
28410 case X86ISD::VRANGE: return "X86ISD::VRANGE";
28411 case X86ISD::VRANGE_SAE: return "X86ISD::VRANGE_SAE";
28412 case X86ISD::VRANGES: return "X86ISD::VRANGES";
28413 case X86ISD::VRANGES_SAE: return "X86ISD::VRANGES_SAE";
28414 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
28415 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
28416 case X86ISD::PSADBW: return "X86ISD::PSADBW";
28417 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
28418 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
28419 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
28420 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
28421 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
28422 case X86ISD::MFENCE: return "X86ISD::MFENCE";
28423 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
28424 case X86ISD::SAHF: return "X86ISD::SAHF";
28425 case X86ISD::RDRAND: return "X86ISD::RDRAND";
28426 case X86ISD::RDSEED: return "X86ISD::RDSEED";
28427 case X86ISD::RDPKRU: return "X86ISD::RDPKRU";
28428 case X86ISD::WRPKRU: return "X86ISD::WRPKRU";
28429 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
28430 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
28431 case X86ISD::VPSHA: return "X86ISD::VPSHA";
28432 case X86ISD::VPSHL: return "X86ISD::VPSHL";
28433 case X86ISD::VPCOM: return "X86ISD::VPCOM";
28434 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
28435 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
28436 case X86ISD::FMSUB: return "X86ISD::FMSUB";
28437 case X86ISD::FNMADD: return "X86ISD::FNMADD";
28438 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
28439 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
28440 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
28441 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
28442 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
28443 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
28444 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
28445 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
28446 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
28447 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
28448 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
28449 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
28450 case X86ISD::VRNDSCALE_SAE: return "X86ISD::VRNDSCALE_SAE";
28451 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
28452 case X86ISD::VRNDSCALES_SAE: return "X86ISD::VRNDSCALES_SAE";
28453 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
28454 case X86ISD::VREDUCE_SAE: return "X86ISD::VREDUCE_SAE";
28455 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
28456 case X86ISD::VREDUCES_SAE: return "X86ISD::VREDUCES_SAE";
28457 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
28458 case X86ISD::VGETMANT_SAE: return "X86ISD::VGETMANT_SAE";
28459 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
28460 case X86ISD::VGETMANTS_SAE: return "X86ISD::VGETMANTS_SAE";
28461 case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
28462 case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
28463 case X86ISD::XTEST: return "X86ISD::XTEST";
28464 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
28465 case X86ISD::EXPAND: return "X86ISD::EXPAND";
28466 case X86ISD::SELECTS: return "X86ISD::SELECTS";
28467 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
28468 case X86ISD::RCP14: return "X86ISD::RCP14";
28469 case X86ISD::RCP14S: return "X86ISD::RCP14S";
28470 case X86ISD::RCP28: return "X86ISD::RCP28";
28471 case X86ISD::RCP28_SAE: return "X86ISD::RCP28_SAE";
28472 case X86ISD::RCP28S: return "X86ISD::RCP28S";
28473 case X86ISD::RCP28S_SAE: return "X86ISD::RCP28S_SAE";
28474 case X86ISD::EXP2: return "X86ISD::EXP2";
28475 case X86ISD::EXP2_SAE: return "X86ISD::EXP2_SAE";
28476 case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
28477 case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
28478 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
28479 case X86ISD::RSQRT28_SAE: return "X86ISD::RSQRT28_SAE";
28480 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
28481 case X86ISD::RSQRT28S_SAE: return "X86ISD::RSQRT28S_SAE";
28482 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
28483 case X86ISD::FADDS: return "X86ISD::FADDS";
28484 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
28485 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
28486 case X86ISD::FSUBS: return "X86ISD::FSUBS";
28487 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
28488 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
28489 case X86ISD::FMULS: return "X86ISD::FMULS";
28490 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
28491 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
28492 case X86ISD::FDIVS: return "X86ISD::FDIVS";
28493 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
28494 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
28495 case X86ISD::FSQRTS: return "X86ISD::FSQRTS";
28496 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
28497 case X86ISD::FGETEXP: return "X86ISD::FGETEXP";
28498 case X86ISD::FGETEXP_SAE: return "X86ISD::FGETEXP_SAE";
28499 case X86ISD::FGETEXPS: return "X86ISD::FGETEXPS";
28500 case X86ISD::FGETEXPS_SAE: return "X86ISD::FGETEXPS_SAE";
28501 case X86ISD::SCALEF: return "X86ISD::SCALEF";
28502 case X86ISD::SCALEF_RND: return "X86ISD::SCALEF_RND";
28503 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
28504 case X86ISD::SCALEFS_RND: return "X86ISD::SCALEFS_RND";
28505 case X86ISD::AVG: return "X86ISD::AVG";
28506 case X86ISD::MULHRS: return "X86ISD::MULHRS";
28507 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
28508 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
28509 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
28510 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
28511 case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
28512 case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
28513 case X86ISD::CVTTP2SI_SAE: return "X86ISD::CVTTP2SI_SAE";
28514 case X86ISD::CVTTP2UI_SAE: return "X86ISD::CVTTP2UI_SAE";
28515 case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
28516 case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
28517 case X86ISD::CVTTS2SI_SAE: return "X86ISD::CVTTS2SI_SAE";
28518 case X86ISD::CVTTS2UI_SAE: return "X86ISD::CVTTS2UI_SAE";
28519 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
28520 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
28521 case X86ISD::MCVTSI2P: return "X86ISD::MCVTSI2P";
28522 case X86ISD::MCVTUI2P: return "X86ISD::MCVTUI2P";
28523 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
28524 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
28525 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
28526 case X86ISD::SCALAR_SINT_TO_FP: return "X86ISD::SCALAR_SINT_TO_FP";
28527 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
28528 case X86ISD::SCALAR_UINT_TO_FP: return "X86ISD::SCALAR_UINT_TO_FP";
28529 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
28530 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
28531 case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
28532 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
28533 case X86ISD::CVTPH2PS_SAE: return "X86ISD::CVTPH2PS_SAE";
28534 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
28535 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
28536 case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
28537 case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
28538 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
28539 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
28540 case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
28541 case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
28542 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
28543 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
28544 case X86ISD::CVTNE2PS2BF16: return "X86ISD::CVTNE2PS2BF16";
28545 case X86ISD::CVTNEPS2BF16: return "X86ISD::CVTNEPS2BF16";
28546 case X86ISD::MCVTNEPS2BF16: return "X86ISD::MCVTNEPS2BF16";
28547 case X86ISD::DPBF16PS: return "X86ISD::DPBF16PS";
28548 case X86ISD::LWPINS: return "X86ISD::LWPINS";
28549 case X86ISD::MGATHER: return "X86ISD::MGATHER";
28550 case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
28551 case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
28552 case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
28553 case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
28554 case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
28555 case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
28556 case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
28557 case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
28558 case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
28559 case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
28560 case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
28561 case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
28562 case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
28563 case X86ISD::ENQCMD: return "X86ISD:ENQCMD";
28564 case X86ISD::ENQCMDS: return "X86ISD:ENQCMDS";
28565 case X86ISD::VP2INTERSECT: return "X86ISD::VP2INTERSECT";
28570 /// Return true if the addressing mode represented by AM is legal for this
28571 /// target, for a load/store of the specified type.
28572 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
28573 const AddrMode &AM, Type *Ty,
28575 Instruction *I) const {
28576 // X86 supports extremely general addressing modes.
28577 CodeModel::Model M = getTargetMachine().getCodeModel();
28579 // X86 allows a sign-extended 32-bit immediate field as a displacement.
28580 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
28584 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
28586 // If a reference to this global requires an extra load, we can't fold it.
28587 if (isGlobalStubReference(GVFlags))
28590 // If BaseGV requires a register for the PIC base, we cannot also have a
28591 // BaseReg specified.
28592 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
28595 // If lower 4G is not available, then we must use rip-relative addressing.
28596 if ((M != CodeModel::Small || isPositionIndependent()) &&
28597 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
28601 switch (AM.Scale) {
28607 // These scales always work.
28612 // These scales are formed with basereg+scalereg. Only accept if there is
28617 default: // Other stuff never works.
28624 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
28625 unsigned Bits = Ty->getScalarSizeInBits();
28627 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
28628 // particularly cheaper than those without.
28632 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
28633 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
28634 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
28637 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
28638 // shifts just as cheap as scalar ones.
28639 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
28642 // AVX512BW has shifts such as vpsllvw.
28643 if (Subtarget.hasBWI() && Bits == 16)
28646 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
28647 // fully general vector.
28651 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
28653 // These are non-commutative binops.
28654 // TODO: Add more X86ISD opcodes once we have test coverage.
28655 case X86ISD::ANDNP:
28656 case X86ISD::PCMPGT:
28659 case X86ISD::FANDN:
28663 return TargetLoweringBase::isBinOp(Opcode);
28666 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
28668 // TODO: Add more X86ISD opcodes once we have test coverage.
28669 case X86ISD::PCMPEQ:
28670 case X86ISD::PMULDQ:
28671 case X86ISD::PMULUDQ:
28672 case X86ISD::FMAXC:
28673 case X86ISD::FMINC:
28680 return TargetLoweringBase::isCommutativeBinOp(Opcode);
28683 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
28684 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28686 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
28687 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
28688 return NumBits1 > NumBits2;
28691 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
28692 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28695 if (!isTypeLegal(EVT::getEVT(Ty1)))
28698 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
28700 // Assuming the caller doesn't have a zeroext or signext return parameter,
28701 // truncation all the way down to i1 is valid.
28705 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
28706 return isInt<32>(Imm);
28709 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
28710 // Can also use sub to handle negated immediates.
28711 return isInt<32>(Imm);
28714 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
28715 return isInt<32>(Imm);
28718 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
28719 if (!VT1.isInteger() || !VT2.isInteger())
28721 unsigned NumBits1 = VT1.getSizeInBits();
28722 unsigned NumBits2 = VT2.getSizeInBits();
28723 return NumBits1 > NumBits2;
28726 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
28727 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
28728 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
28731 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
28732 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
28733 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
28736 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
28737 EVT VT1 = Val.getValueType();
28738 if (isZExtFree(VT1, VT2))
28741 if (Val.getOpcode() != ISD::LOAD)
28744 if (!VT1.isSimple() || !VT1.isInteger() ||
28745 !VT2.isSimple() || !VT2.isInteger())
28748 switch (VT1.getSimpleVT().SimpleTy) {
28753 // X86 has 8, 16, and 32-bit zero-extending loads.
28760 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
28761 EVT SrcVT = ExtVal.getOperand(0).getValueType();
28763 // There is no extending load for vXi1.
28764 if (SrcVT.getScalarType() == MVT::i1)
28771 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
28772 if (!Subtarget.hasAnyFMA())
28775 VT = VT.getScalarType();
28777 if (!VT.isSimple())
28780 switch (VT.getSimpleVT().SimpleTy) {
28791 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
28792 // i16 instructions are longer (0x66 prefix) and potentially slower.
28793 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
28796 /// Targets can use this to indicate that they only support *some*
28797 /// VECTOR_SHUFFLE operations, those with specific masks.
28798 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
28799 /// are assumed to be legal.
28800 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
28801 if (!VT.isSimple())
28804 // Not for i1 vectors
28805 if (VT.getSimpleVT().getScalarType() == MVT::i1)
28808 // Very little shuffling can be done for 64-bit vectors right now.
28809 if (VT.getSimpleVT().getSizeInBits() == 64)
28812 // We only care that the types being shuffled are legal. The lowering can
28813 // handle any possible shuffle mask that results.
28814 return isTypeLegal(VT.getSimpleVT());
28817 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
28819 // Don't convert an 'and' into a shuffle that we don't directly support.
28820 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
28821 if (!Subtarget.hasAVX2())
28822 if (VT == MVT::v32i8 || VT == MVT::v16i16)
28825 // Just delegate to the generic legality, clear masks aren't special.
28826 return isShuffleMaskLegal(Mask, VT);
28829 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
28830 // If the subtarget is using retpolines, we need to not generate jump tables.
28831 if (Subtarget.useRetpolineIndirectBranches())
28834 // Otherwise, fallback on the generic logic.
28835 return TargetLowering::areJTsAllowed(Fn);
28838 //===----------------------------------------------------------------------===//
28839 // X86 Scheduler Hooks
28840 //===----------------------------------------------------------------------===//
28842 /// Utility function to emit xbegin specifying the start of an RTM region.
28843 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
28844 const TargetInstrInfo *TII) {
28845 DebugLoc DL = MI.getDebugLoc();
28847 const BasicBlock *BB = MBB->getBasicBlock();
28848 MachineFunction::iterator I = ++MBB->getIterator();
28850 // For the v = xbegin(), we generate
28859 // eax = # XABORT_DEF
28863 // v = phi(s0/mainBB, s1/fallBB)
28865 MachineBasicBlock *thisMBB = MBB;
28866 MachineFunction *MF = MBB->getParent();
28867 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
28868 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
28869 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
28870 MF->insert(I, mainMBB);
28871 MF->insert(I, fallMBB);
28872 MF->insert(I, sinkMBB);
28874 // Transfer the remainder of BB and its successor edges to sinkMBB.
28875 sinkMBB->splice(sinkMBB->begin(), MBB,
28876 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
28877 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
28879 MachineRegisterInfo &MRI = MF->getRegInfo();
28880 unsigned DstReg = MI.getOperand(0).getReg();
28881 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
28882 unsigned mainDstReg = MRI.createVirtualRegister(RC);
28883 unsigned fallDstReg = MRI.createVirtualRegister(RC);
28887 // # fallthrough to mainMBB
28888 // # abortion to fallMBB
28889 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
28890 thisMBB->addSuccessor(mainMBB);
28891 thisMBB->addSuccessor(fallMBB);
28894 // mainDstReg := -1
28895 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
28896 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
28897 mainMBB->addSuccessor(sinkMBB);
28900 // ; pseudo instruction to model hardware's definition from XABORT
28901 // EAX := XABORT_DEF
28902 // fallDstReg := EAX
28903 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
28904 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
28906 fallMBB->addSuccessor(sinkMBB);
28909 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
28910 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
28911 .addReg(mainDstReg).addMBB(mainMBB)
28912 .addReg(fallDstReg).addMBB(fallMBB);
28914 MI.eraseFromParent();
28920 MachineBasicBlock *
28921 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
28922 MachineBasicBlock *MBB) const {
28923 // Emit va_arg instruction on X86-64.
28925 // Operands to this pseudo-instruction:
28926 // 0 ) Output : destination address (reg)
28927 // 1-5) Input : va_list address (addr, i64mem)
28928 // 6 ) ArgSize : Size (in bytes) of vararg type
28929 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
28930 // 8 ) Align : Alignment of type
28931 // 9 ) EFLAGS (implicit-def)
28933 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
28934 static_assert(X86::AddrNumOperands == 5,
28935 "VAARG_64 assumes 5 address operands");
28937 unsigned DestReg = MI.getOperand(0).getReg();
28938 MachineOperand &Base = MI.getOperand(1);
28939 MachineOperand &Scale = MI.getOperand(2);
28940 MachineOperand &Index = MI.getOperand(3);
28941 MachineOperand &Disp = MI.getOperand(4);
28942 MachineOperand &Segment = MI.getOperand(5);
28943 unsigned ArgSize = MI.getOperand(6).getImm();
28944 unsigned ArgMode = MI.getOperand(7).getImm();
28945 unsigned Align = MI.getOperand(8).getImm();
28947 MachineFunction *MF = MBB->getParent();
28949 // Memory Reference
28950 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
28952 MachineMemOperand *OldMMO = MI.memoperands().front();
28954 // Clone the MMO into two separate MMOs for loading and storing
28955 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
28956 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
28957 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
28958 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
28960 // Machine Information
28961 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
28962 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
28963 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
28964 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
28965 DebugLoc DL = MI.getDebugLoc();
28967 // struct va_list {
28970 // i64 overflow_area (address)
28971 // i64 reg_save_area (address)
28973 // sizeof(va_list) = 24
28974 // alignment(va_list) = 8
28976 unsigned TotalNumIntRegs = 6;
28977 unsigned TotalNumXMMRegs = 8;
28978 bool UseGPOffset = (ArgMode == 1);
28979 bool UseFPOffset = (ArgMode == 2);
28980 unsigned MaxOffset = TotalNumIntRegs * 8 +
28981 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
28983 /* Align ArgSize to a multiple of 8 */
28984 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
28985 bool NeedsAlign = (Align > 8);
28987 MachineBasicBlock *thisMBB = MBB;
28988 MachineBasicBlock *overflowMBB;
28989 MachineBasicBlock *offsetMBB;
28990 MachineBasicBlock *endMBB;
28992 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
28993 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
28994 unsigned OffsetReg = 0;
28996 if (!UseGPOffset && !UseFPOffset) {
28997 // If we only pull from the overflow region, we don't create a branch.
28998 // We don't need to alter control flow.
28999 OffsetDestReg = 0; // unused
29000 OverflowDestReg = DestReg;
29002 offsetMBB = nullptr;
29003 overflowMBB = thisMBB;
29006 // First emit code to check if gp_offset (or fp_offset) is below the bound.
29007 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
29008 // If not, pull from overflow_area. (branch to overflowMBB)
29013 // offsetMBB overflowMBB
29018 // Registers for the PHI in endMBB
29019 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
29020 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
29022 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29023 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29024 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29025 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29027 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29029 // Insert the new basic blocks
29030 MF->insert(MBBIter, offsetMBB);
29031 MF->insert(MBBIter, overflowMBB);
29032 MF->insert(MBBIter, endMBB);
29034 // Transfer the remainder of MBB and its successor edges to endMBB.
29035 endMBB->splice(endMBB->begin(), thisMBB,
29036 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
29037 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
29039 // Make offsetMBB and overflowMBB successors of thisMBB
29040 thisMBB->addSuccessor(offsetMBB);
29041 thisMBB->addSuccessor(overflowMBB);
29043 // endMBB is a successor of both offsetMBB and overflowMBB
29044 offsetMBB->addSuccessor(endMBB);
29045 overflowMBB->addSuccessor(endMBB);
29047 // Load the offset value into a register
29048 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29049 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
29053 .addDisp(Disp, UseFPOffset ? 4 : 0)
29055 .setMemRefs(LoadOnlyMMO);
29057 // Check if there is enough room left to pull this argument.
29058 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
29060 .addImm(MaxOffset + 8 - ArgSizeA8);
29062 // Branch to "overflowMBB" if offset >= max
29063 // Fall through to "offsetMBB" otherwise
29064 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
29065 .addMBB(overflowMBB).addImm(X86::COND_AE);
29068 // In offsetMBB, emit code to use the reg_save_area.
29070 assert(OffsetReg != 0);
29072 // Read the reg_save_area address.
29073 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
29074 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
29080 .setMemRefs(LoadOnlyMMO);
29082 // Zero-extend the offset
29083 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
29084 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
29087 .addImm(X86::sub_32bit);
29089 // Add the offset to the reg_save_area to get the final address.
29090 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
29091 .addReg(OffsetReg64)
29092 .addReg(RegSaveReg);
29094 // Compute the offset for the next argument
29095 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29096 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
29098 .addImm(UseFPOffset ? 16 : 8);
29100 // Store it back into the va_list.
29101 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
29105 .addDisp(Disp, UseFPOffset ? 4 : 0)
29107 .addReg(NextOffsetReg)
29108 .setMemRefs(StoreOnlyMMO);
29111 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
29116 // Emit code to use overflow area
29119 // Load the overflow_area address into a register.
29120 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
29121 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
29127 .setMemRefs(LoadOnlyMMO);
29129 // If we need to align it, do so. Otherwise, just copy the address
29130 // to OverflowDestReg.
29132 // Align the overflow address
29133 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
29134 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
29136 // aligned_addr = (addr + (align-1)) & ~(align-1)
29137 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
29138 .addReg(OverflowAddrReg)
29141 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
29143 .addImm(~(uint64_t)(Align-1));
29145 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
29146 .addReg(OverflowAddrReg);
29149 // Compute the next overflow address after this argument.
29150 // (the overflow address should be kept 8-byte aligned)
29151 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
29152 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
29153 .addReg(OverflowDestReg)
29154 .addImm(ArgSizeA8);
29156 // Store the new overflow address.
29157 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
29163 .addReg(NextAddrReg)
29164 .setMemRefs(StoreOnlyMMO);
29166 // If we branched, emit the PHI to the front of endMBB.
29168 BuildMI(*endMBB, endMBB->begin(), DL,
29169 TII->get(X86::PHI), DestReg)
29170 .addReg(OffsetDestReg).addMBB(offsetMBB)
29171 .addReg(OverflowDestReg).addMBB(overflowMBB);
29174 // Erase the pseudo instruction
29175 MI.eraseFromParent();
29180 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
29181 MachineInstr &MI, MachineBasicBlock *MBB) const {
29182 // Emit code to save XMM registers to the stack. The ABI says that the
29183 // number of registers to save is given in %al, so it's theoretically
29184 // possible to do an indirect jump trick to avoid saving all of them,
29185 // however this code takes a simpler approach and just executes all
29186 // of the stores if %al is non-zero. It's less code, and it's probably
29187 // easier on the hardware branch predictor, and stores aren't all that
29188 // expensive anyway.
29190 // Create the new basic blocks. One block contains all the XMM stores,
29191 // and one block is the final destination regardless of whether any
29192 // stores were performed.
29193 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29194 MachineFunction *F = MBB->getParent();
29195 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29196 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
29197 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
29198 F->insert(MBBIter, XMMSaveMBB);
29199 F->insert(MBBIter, EndMBB);
29201 // Transfer the remainder of MBB and its successor edges to EndMBB.
29202 EndMBB->splice(EndMBB->begin(), MBB,
29203 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29204 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
29206 // The original block will now fall through to the XMM save block.
29207 MBB->addSuccessor(XMMSaveMBB);
29208 // The XMMSaveMBB will fall through to the end block.
29209 XMMSaveMBB->addSuccessor(EndMBB);
29211 // Now add the instructions.
29212 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29213 DebugLoc DL = MI.getDebugLoc();
29215 unsigned CountReg = MI.getOperand(0).getReg();
29216 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
29217 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
29219 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
29220 // If %al is 0, branch around the XMM save block.
29221 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
29222 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
29223 MBB->addSuccessor(EndMBB);
29226 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
29227 // that was just emitted, but clearly shouldn't be "saved".
29228 assert((MI.getNumOperands() <= 3 ||
29229 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
29230 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
29231 "Expected last argument to be EFLAGS");
29232 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
29233 // In the XMM save block, save all the XMM argument registers.
29234 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
29235 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
29236 MachineMemOperand *MMO = F->getMachineMemOperand(
29237 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
29238 MachineMemOperand::MOStore,
29239 /*Size=*/16, /*Align=*/16);
29240 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
29241 .addFrameIndex(RegSaveFrameIndex)
29242 .addImm(/*Scale=*/1)
29243 .addReg(/*IndexReg=*/0)
29244 .addImm(/*Disp=*/Offset)
29245 .addReg(/*Segment=*/0)
29246 .addReg(MI.getOperand(i).getReg())
29247 .addMemOperand(MMO);
29250 MI.eraseFromParent(); // The pseudo instruction is gone now.
29255 // The EFLAGS operand of SelectItr might be missing a kill marker
29256 // because there were multiple uses of EFLAGS, and ISel didn't know
29257 // which to mark. Figure out whether SelectItr should have had a
29258 // kill marker, and set it if it should. Returns the correct kill
29260 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
29261 MachineBasicBlock* BB,
29262 const TargetRegisterInfo* TRI) {
29263 // Scan forward through BB for a use/def of EFLAGS.
29264 MachineBasicBlock::iterator miI(std::next(SelectItr));
29265 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
29266 const MachineInstr& mi = *miI;
29267 if (mi.readsRegister(X86::EFLAGS))
29269 if (mi.definesRegister(X86::EFLAGS))
29270 break; // Should have kill-flag - update below.
29273 // If we hit the end of the block, check whether EFLAGS is live into a
29275 if (miI == BB->end()) {
29276 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
29277 sEnd = BB->succ_end();
29278 sItr != sEnd; ++sItr) {
29279 MachineBasicBlock* succ = *sItr;
29280 if (succ->isLiveIn(X86::EFLAGS))
29285 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
29286 // out. SelectMI should have a kill flag on EFLAGS.
29287 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
29291 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
29292 // together with other CMOV pseudo-opcodes into a single basic-block with
29293 // conditional jump around it.
29294 static bool isCMOVPseudo(MachineInstr &MI) {
29295 switch (MI.getOpcode()) {
29296 case X86::CMOV_FR32:
29297 case X86::CMOV_FR64:
29298 case X86::CMOV_GR8:
29299 case X86::CMOV_GR16:
29300 case X86::CMOV_GR32:
29301 case X86::CMOV_RFP32:
29302 case X86::CMOV_RFP64:
29303 case X86::CMOV_RFP80:
29304 case X86::CMOV_VR128:
29305 case X86::CMOV_VR128X:
29306 case X86::CMOV_VR256:
29307 case X86::CMOV_VR256X:
29308 case X86::CMOV_VR512:
29309 case X86::CMOV_VK2:
29310 case X86::CMOV_VK4:
29311 case X86::CMOV_VK8:
29312 case X86::CMOV_VK16:
29313 case X86::CMOV_VK32:
29314 case X86::CMOV_VK64:
29322 // Helper function, which inserts PHI functions into SinkMBB:
29323 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
29324 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
29325 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
29326 // the last PHI function inserted.
29327 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
29328 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
29329 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
29330 MachineBasicBlock *SinkMBB) {
29331 MachineFunction *MF = TrueMBB->getParent();
29332 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
29333 DebugLoc DL = MIItBegin->getDebugLoc();
29335 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
29336 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29338 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
29340 // As we are creating the PHIs, we have to be careful if there is more than
29341 // one. Later CMOVs may reference the results of earlier CMOVs, but later
29342 // PHIs have to reference the individual true/false inputs from earlier PHIs.
29343 // That also means that PHI construction must work forward from earlier to
29344 // later, and that the code must maintain a mapping from earlier PHI's
29345 // destination registers, and the registers that went into the PHI.
29346 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
29347 MachineInstrBuilder MIB;
29349 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
29350 unsigned DestReg = MIIt->getOperand(0).getReg();
29351 unsigned Op1Reg = MIIt->getOperand(1).getReg();
29352 unsigned Op2Reg = MIIt->getOperand(2).getReg();
29354 // If this CMOV we are generating is the opposite condition from
29355 // the jump we generated, then we have to swap the operands for the
29356 // PHI that is going to be generated.
29357 if (MIIt->getOperand(3).getImm() == OppCC)
29358 std::swap(Op1Reg, Op2Reg);
29360 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
29361 Op1Reg = RegRewriteTable[Op1Reg].first;
29363 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
29364 Op2Reg = RegRewriteTable[Op2Reg].second;
29366 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
29372 // Add this PHI to the rewrite table.
29373 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
29379 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
29380 MachineBasicBlock *
29381 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
29382 MachineInstr &SecondCascadedCMOV,
29383 MachineBasicBlock *ThisMBB) const {
29384 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29385 DebugLoc DL = FirstCMOV.getDebugLoc();
29387 // We lower cascaded CMOVs such as
29389 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
29391 // to two successive branches.
29393 // Without this, we would add a PHI between the two jumps, which ends up
29394 // creating a few copies all around. For instance, for
29396 // (sitofp (zext (fcmp une)))
29398 // we would generate:
29400 // ucomiss %xmm1, %xmm0
29401 // movss <1.0f>, %xmm0
29402 // movaps %xmm0, %xmm1
29404 // xorps %xmm1, %xmm1
29407 // movaps %xmm1, %xmm0
29411 // because this custom-inserter would have generated:
29423 // A: X = ...; Y = ...
29425 // C: Z = PHI [X, A], [Y, B]
29427 // E: PHI [X, C], [Z, D]
29429 // If we lower both CMOVs in a single step, we can instead generate:
29441 // A: X = ...; Y = ...
29443 // E: PHI [X, A], [X, C], [Y, D]
29445 // Which, in our sitofp/fcmp example, gives us something like:
29447 // ucomiss %xmm1, %xmm0
29448 // movss <1.0f>, %xmm0
29451 // xorps %xmm0, %xmm0
29456 // We lower cascaded CMOV into two successive branches to the same block.
29457 // EFLAGS is used by both, so mark it as live in the second.
29458 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29459 MachineFunction *F = ThisMBB->getParent();
29460 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29461 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29462 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29464 MachineFunction::iterator It = ++ThisMBB->getIterator();
29465 F->insert(It, FirstInsertedMBB);
29466 F->insert(It, SecondInsertedMBB);
29467 F->insert(It, SinkMBB);
29469 // For a cascaded CMOV, we lower it to two successive branches to
29470 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
29471 // the FirstInsertedMBB.
29472 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
29474 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29475 // live into the sink and copy blocks.
29476 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29477 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
29478 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
29479 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
29480 SinkMBB->addLiveIn(X86::EFLAGS);
29483 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29484 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
29485 std::next(MachineBasicBlock::iterator(FirstCMOV)),
29487 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29489 // Fallthrough block for ThisMBB.
29490 ThisMBB->addSuccessor(FirstInsertedMBB);
29491 // The true block target of the first branch is always SinkMBB.
29492 ThisMBB->addSuccessor(SinkMBB);
29493 // Fallthrough block for FirstInsertedMBB.
29494 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
29495 // The true block for the branch of FirstInsertedMBB.
29496 FirstInsertedMBB->addSuccessor(SinkMBB);
29497 // This is fallthrough.
29498 SecondInsertedMBB->addSuccessor(SinkMBB);
29500 // Create the conditional branch instructions.
29501 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
29502 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
29504 X86::CondCode SecondCC =
29505 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
29506 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
29509 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
29510 unsigned DestReg = FirstCMOV.getOperand(0).getReg();
29511 unsigned Op1Reg = FirstCMOV.getOperand(1).getReg();
29512 unsigned Op2Reg = FirstCMOV.getOperand(2).getReg();
29513 MachineInstrBuilder MIB =
29514 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
29516 .addMBB(SecondInsertedMBB)
29520 // The second SecondInsertedMBB provides the same incoming value as the
29521 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
29522 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
29523 // Copy the PHI result to the register defined by the second CMOV.
29524 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
29525 TII->get(TargetOpcode::COPY),
29526 SecondCascadedCMOV.getOperand(0).getReg())
29527 .addReg(FirstCMOV.getOperand(0).getReg());
29529 // Now remove the CMOVs.
29530 FirstCMOV.eraseFromParent();
29531 SecondCascadedCMOV.eraseFromParent();
29536 MachineBasicBlock *
29537 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
29538 MachineBasicBlock *ThisMBB) const {
29539 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29540 DebugLoc DL = MI.getDebugLoc();
29542 // To "insert" a SELECT_CC instruction, we actually have to insert the
29543 // diamond control-flow pattern. The incoming instruction knows the
29544 // destination vreg to set, the condition code register to branch on, the
29545 // true/false values to select between and a branch opcode to use.
29550 // cmpTY ccX, r1, r2
29552 // fallthrough --> FalseMBB
29554 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
29555 // as described above, by inserting a BB, and then making a PHI at the join
29556 // point to select the true and false operands of the CMOV in the PHI.
29558 // The code also handles two different cases of multiple CMOV opcodes
29562 // In this case, there are multiple CMOVs in a row, all which are based on
29563 // the same condition setting (or the exact opposite condition setting).
29564 // In this case we can lower all the CMOVs using a single inserted BB, and
29565 // then make a number of PHIs at the join point to model the CMOVs. The only
29566 // trickiness here, is that in a case like:
29568 // t2 = CMOV cond1 t1, f1
29569 // t3 = CMOV cond1 t2, f2
29571 // when rewriting this into PHIs, we have to perform some renaming on the
29572 // temps since you cannot have a PHI operand refer to a PHI result earlier
29573 // in the same block. The "simple" but wrong lowering would be:
29575 // t2 = PHI t1(BB1), f1(BB2)
29576 // t3 = PHI t2(BB1), f2(BB2)
29578 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
29579 // renaming is to note that on the path through BB1, t2 is really just a
29580 // copy of t1, and do that renaming, properly generating:
29582 // t2 = PHI t1(BB1), f1(BB2)
29583 // t3 = PHI t1(BB1), f2(BB2)
29586 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
29587 // function - EmitLoweredCascadedSelect.
29589 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
29590 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29591 MachineInstr *LastCMOV = &MI;
29592 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
29594 // Check for case 1, where there are multiple CMOVs with the same condition
29595 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
29596 // number of jumps the most.
29598 if (isCMOVPseudo(MI)) {
29599 // See if we have a string of CMOVS with the same condition. Skip over
29600 // intervening debug insts.
29601 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
29602 (NextMIIt->getOperand(3).getImm() == CC ||
29603 NextMIIt->getOperand(3).getImm() == OppCC)) {
29604 LastCMOV = &*NextMIIt;
29606 NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
29610 // This checks for case 2, but only do this if we didn't already find
29611 // case 1, as indicated by LastCMOV == MI.
29612 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
29613 NextMIIt->getOpcode() == MI.getOpcode() &&
29614 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
29615 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
29616 NextMIIt->getOperand(1).isKill()) {
29617 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
29620 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29621 MachineFunction *F = ThisMBB->getParent();
29622 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
29623 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29625 MachineFunction::iterator It = ++ThisMBB->getIterator();
29626 F->insert(It, FalseMBB);
29627 F->insert(It, SinkMBB);
29629 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29630 // live into the sink and copy blocks.
29631 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29632 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
29633 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
29634 FalseMBB->addLiveIn(X86::EFLAGS);
29635 SinkMBB->addLiveIn(X86::EFLAGS);
29638 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
29639 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
29640 auto DbgIt = MachineBasicBlock::iterator(MI);
29641 while (DbgIt != DbgEnd) {
29642 auto Next = std::next(DbgIt);
29643 if (DbgIt->isDebugInstr())
29644 SinkMBB->push_back(DbgIt->removeFromParent());
29648 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29649 SinkMBB->splice(SinkMBB->end(), ThisMBB,
29650 std::next(MachineBasicBlock::iterator(LastCMOV)),
29652 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29654 // Fallthrough block for ThisMBB.
29655 ThisMBB->addSuccessor(FalseMBB);
29656 // The true block target of the first (or only) branch is always a SinkMBB.
29657 ThisMBB->addSuccessor(SinkMBB);
29658 // Fallthrough block for FalseMBB.
29659 FalseMBB->addSuccessor(SinkMBB);
29661 // Create the conditional branch instruction.
29662 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
29665 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
29667 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
29668 MachineBasicBlock::iterator MIItEnd =
29669 std::next(MachineBasicBlock::iterator(LastCMOV));
29670 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
29672 // Now remove the CMOV(s).
29673 ThisMBB->erase(MIItBegin, MIItEnd);
29678 MachineBasicBlock *
29679 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
29680 MachineBasicBlock *BB) const {
29681 MachineFunction *MF = BB->getParent();
29682 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29683 DebugLoc DL = MI.getDebugLoc();
29684 const BasicBlock *LLVM_BB = BB->getBasicBlock();
29686 assert(MF->shouldSplitStack());
29688 const bool Is64Bit = Subtarget.is64Bit();
29689 const bool IsLP64 = Subtarget.isTarget64BitLP64();
29691 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
29692 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
29695 // ... [Till the alloca]
29696 // If stacklet is not large enough, jump to mallocMBB
29699 // Allocate by subtracting from RSP
29700 // Jump to continueMBB
29703 // Allocate by call to runtime
29707 // [rest of original BB]
29710 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29711 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29712 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29714 MachineRegisterInfo &MRI = MF->getRegInfo();
29715 const TargetRegisterClass *AddrRegClass =
29716 getRegClassFor(getPointerTy(MF->getDataLayout()));
29718 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
29719 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
29720 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
29721 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
29722 sizeVReg = MI.getOperand(1).getReg(),
29724 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
29726 MachineFunction::iterator MBBIter = ++BB->getIterator();
29728 MF->insert(MBBIter, bumpMBB);
29729 MF->insert(MBBIter, mallocMBB);
29730 MF->insert(MBBIter, continueMBB);
29732 continueMBB->splice(continueMBB->begin(), BB,
29733 std::next(MachineBasicBlock::iterator(MI)), BB->end());
29734 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
29736 // Add code to the main basic block to check if the stack limit has been hit,
29737 // and if so, jump to mallocMBB otherwise to bumpMBB.
29738 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
29739 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
29740 .addReg(tmpSPVReg).addReg(sizeVReg);
29741 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
29742 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
29743 .addReg(SPLimitVReg);
29744 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
29746 // bumpMBB simply decreases the stack pointer, since we know the current
29747 // stacklet has enough space.
29748 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
29749 .addReg(SPLimitVReg);
29750 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
29751 .addReg(SPLimitVReg);
29752 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
29754 // Calls into a routine in libgcc to allocate more space from the heap.
29755 const uint32_t *RegMask =
29756 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
29758 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
29760 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
29761 .addExternalSymbol("__morestack_allocate_stack_space")
29762 .addRegMask(RegMask)
29763 .addReg(X86::RDI, RegState::Implicit)
29764 .addReg(X86::RAX, RegState::ImplicitDefine);
29765 } else if (Is64Bit) {
29766 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
29768 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
29769 .addExternalSymbol("__morestack_allocate_stack_space")
29770 .addRegMask(RegMask)
29771 .addReg(X86::EDI, RegState::Implicit)
29772 .addReg(X86::EAX, RegState::ImplicitDefine);
29774 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
29776 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
29777 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
29778 .addExternalSymbol("__morestack_allocate_stack_space")
29779 .addRegMask(RegMask)
29780 .addReg(X86::EAX, RegState::ImplicitDefine);
29784 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
29787 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
29788 .addReg(IsLP64 ? X86::RAX : X86::EAX);
29789 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
29791 // Set up the CFG correctly.
29792 BB->addSuccessor(bumpMBB);
29793 BB->addSuccessor(mallocMBB);
29794 mallocMBB->addSuccessor(continueMBB);
29795 bumpMBB->addSuccessor(continueMBB);
29797 // Take care of the PHI nodes.
29798 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
29799 MI.getOperand(0).getReg())
29800 .addReg(mallocPtrVReg)
29802 .addReg(bumpSPPtrVReg)
29805 // Delete the original pseudo instruction.
29806 MI.eraseFromParent();
29809 return continueMBB;
29812 MachineBasicBlock *
29813 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
29814 MachineBasicBlock *BB) const {
29815 MachineFunction *MF = BB->getParent();
29816 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29817 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
29818 DebugLoc DL = MI.getDebugLoc();
29820 assert(!isAsynchronousEHPersonality(
29821 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
29822 "SEH does not use catchret!");
29824 // Only 32-bit EH needs to worry about manually restoring stack pointers.
29825 if (!Subtarget.is32Bit())
29828 // C++ EH creates a new target block to hold the restore code, and wires up
29829 // the new block to the return destination with a normal JMP_4.
29830 MachineBasicBlock *RestoreMBB =
29831 MF->CreateMachineBasicBlock(BB->getBasicBlock());
29832 assert(BB->succ_size() == 1);
29833 MF->insert(std::next(BB->getIterator()), RestoreMBB);
29834 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
29835 BB->addSuccessor(RestoreMBB);
29836 MI.getOperand(0).setMBB(RestoreMBB);
29838 auto RestoreMBBI = RestoreMBB->begin();
29839 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
29840 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
29844 MachineBasicBlock *
29845 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
29846 MachineBasicBlock *BB) const {
29847 MachineFunction *MF = BB->getParent();
29848 const Constant *PerFn = MF->getFunction().getPersonalityFn();
29849 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
29850 // Only 32-bit SEH requires special handling for catchpad.
29851 if (IsSEH && Subtarget.is32Bit()) {
29852 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29853 DebugLoc DL = MI.getDebugLoc();
29854 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
29856 MI.eraseFromParent();
29860 MachineBasicBlock *
29861 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
29862 MachineBasicBlock *BB) const {
29863 // So, here we replace TLSADDR with the sequence:
29864 // adjust_stackdown -> TLSADDR -> adjust_stackup.
29865 // We need this because TLSADDR is lowered into calls
29866 // inside MC, therefore without the two markers shrink-wrapping
29867 // may push the prologue/epilogue pass them.
29868 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
29869 DebugLoc DL = MI.getDebugLoc();
29870 MachineFunction &MF = *BB->getParent();
29872 // Emit CALLSEQ_START right before the instruction.
29873 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
29874 MachineInstrBuilder CallseqStart =
29875 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
29876 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
29878 // Emit CALLSEQ_END right after the instruction.
29879 // We don't call erase from parent because we want to keep the
29880 // original instruction around.
29881 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
29882 MachineInstrBuilder CallseqEnd =
29883 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
29884 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
29889 MachineBasicBlock *
29890 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
29891 MachineBasicBlock *BB) const {
29892 // This is pretty easy. We're taking the value that we received from
29893 // our load from the relocation, sticking it in either RDI (x86-64)
29894 // or EAX and doing an indirect call. The return value will then
29895 // be in the normal return register.
29896 MachineFunction *F = BB->getParent();
29897 const X86InstrInfo *TII = Subtarget.getInstrInfo();
29898 DebugLoc DL = MI.getDebugLoc();
29900 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
29901 assert(MI.getOperand(3).isGlobal() && "This should be a global");
29903 // Get a register mask for the lowered call.
29904 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
29905 // proper register mask.
29906 const uint32_t *RegMask =
29907 Subtarget.is64Bit() ?
29908 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
29909 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
29910 if (Subtarget.is64Bit()) {
29911 MachineInstrBuilder MIB =
29912 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
29916 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29917 MI.getOperand(3).getTargetFlags())
29919 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
29920 addDirectMem(MIB, X86::RDI);
29921 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
29922 } else if (!isPositionIndependent()) {
29923 MachineInstrBuilder MIB =
29924 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
29928 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29929 MI.getOperand(3).getTargetFlags())
29931 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
29932 addDirectMem(MIB, X86::EAX);
29933 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
29935 MachineInstrBuilder MIB =
29936 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
29937 .addReg(TII->getGlobalBaseReg(F))
29940 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
29941 MI.getOperand(3).getTargetFlags())
29943 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
29944 addDirectMem(MIB, X86::EAX);
29945 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
29948 MI.eraseFromParent(); // The pseudo instruction is gone now.
29952 static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
29954 case X86::RETPOLINE_CALL32:
29955 return X86::CALLpcrel32;
29956 case X86::RETPOLINE_CALL64:
29957 return X86::CALL64pcrel32;
29958 case X86::RETPOLINE_TCRETURN32:
29959 return X86::TCRETURNdi;
29960 case X86::RETPOLINE_TCRETURN64:
29961 return X86::TCRETURNdi64;
29963 llvm_unreachable("not retpoline opcode");
29966 static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
29968 if (Subtarget.useRetpolineExternalThunk()) {
29969 // When using an external thunk for retpolines, we pick names that match the
29970 // names GCC happens to use as well. This helps simplify the implementation
29971 // of the thunks for kernels where they have no easy ability to create
29972 // aliases and are doing non-trivial configuration of the thunk's body. For
29973 // example, the Linux kernel will do boot-time hot patching of the thunk
29974 // bodies and cannot easily export aliases of these to loaded modules.
29976 // Note that at any point in the future, we may need to change the semantics
29977 // of how we implement retpolines and at that time will likely change the
29978 // name of the called thunk. Essentially, there is no hard guarantee that
29979 // LLVM will generate calls to specific thunks, we merely make a best-effort
29980 // attempt to help out kernels and other systems where duplicating the
29981 // thunks is costly.
29984 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29985 return "__x86_indirect_thunk_eax";
29987 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29988 return "__x86_indirect_thunk_ecx";
29990 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29991 return "__x86_indirect_thunk_edx";
29993 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
29994 return "__x86_indirect_thunk_edi";
29996 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
29997 return "__x86_indirect_thunk_r11";
29999 llvm_unreachable("unexpected reg for retpoline");
30002 // When targeting an internal COMDAT thunk use an LLVM-specific name.
30005 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30006 return "__llvm_retpoline_eax";
30008 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30009 return "__llvm_retpoline_ecx";
30011 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30012 return "__llvm_retpoline_edx";
30014 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30015 return "__llvm_retpoline_edi";
30017 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
30018 return "__llvm_retpoline_r11";
30020 llvm_unreachable("unexpected reg for retpoline");
30023 MachineBasicBlock *
30024 X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
30025 MachineBasicBlock *BB) const {
30026 // Copy the virtual register into the R11 physical register and
30027 // call the retpoline thunk.
30028 DebugLoc DL = MI.getDebugLoc();
30029 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30030 unsigned CalleeVReg = MI.getOperand(0).getReg();
30031 unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
30033 // Find an available scratch register to hold the callee. On 64-bit, we can
30034 // just use R11, but we scan for uses anyway to ensure we don't generate
30035 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
30036 // already a register use operand to the call to hold the callee. If none
30037 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
30038 // register and ESI is the base pointer to realigned stack frames with VLAs.
30039 SmallVector<unsigned, 3> AvailableRegs;
30040 if (Subtarget.is64Bit())
30041 AvailableRegs.push_back(X86::R11);
30043 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
30045 // Zero out any registers that are already used.
30046 for (const auto &MO : MI.operands()) {
30047 if (MO.isReg() && MO.isUse())
30048 for (unsigned &Reg : AvailableRegs)
30049 if (Reg == MO.getReg())
30053 // Choose the first remaining non-zero available register.
30054 unsigned AvailableReg = 0;
30055 for (unsigned MaybeReg : AvailableRegs) {
30057 AvailableReg = MaybeReg;
30062 report_fatal_error("calling convention incompatible with retpoline, no "
30063 "available registers");
30065 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
30067 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
30068 .addReg(CalleeVReg);
30069 MI.getOperand(0).ChangeToES(Symbol);
30070 MI.setDesc(TII->get(Opc));
30071 MachineInstrBuilder(*BB->getParent(), &MI)
30072 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
30076 /// SetJmp implies future control flow change upon calling the corresponding
30078 /// Instead of using the 'return' instruction, the long jump fixes the stack and
30079 /// performs an indirect branch. To do so it uses the registers that were stored
30080 /// in the jump buffer (when calling SetJmp).
30081 /// In case the shadow stack is enabled we need to fix it as well, because some
30082 /// return addresses will be skipped.
30083 /// The function will save the SSP for future fixing in the function
30084 /// emitLongJmpShadowStackFix.
30085 /// \sa emitLongJmpShadowStackFix
30086 /// \param [in] MI The temporary Machine Instruction for the builtin.
30087 /// \param [in] MBB The Machine Basic Block that will be modified.
30088 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
30089 MachineBasicBlock *MBB) const {
30090 DebugLoc DL = MI.getDebugLoc();
30091 MachineFunction *MF = MBB->getParent();
30092 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30093 MachineRegisterInfo &MRI = MF->getRegInfo();
30094 MachineInstrBuilder MIB;
30096 // Memory Reference.
30097 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30098 MI.memoperands_end());
30100 // Initialize a register with zero.
30101 MVT PVT = getPointerTy(MF->getDataLayout());
30102 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30103 unsigned ZReg = MRI.createVirtualRegister(PtrRC);
30104 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30105 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
30107 .addReg(ZReg, RegState::Undef)
30108 .addReg(ZReg, RegState::Undef);
30110 // Read the current SSP Register value to the zeroed register.
30111 unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30112 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30113 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30115 // Write the SSP register value to offset 3 in input memory buffer.
30116 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30117 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
30118 const int64_t SSPOffset = 3 * PVT.getStoreSize();
30119 const unsigned MemOpndSlot = 1;
30120 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30121 if (i == X86::AddrDisp)
30122 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
30124 MIB.add(MI.getOperand(MemOpndSlot + i));
30126 MIB.addReg(SSPCopyReg);
30127 MIB.setMemRefs(MMOs);
30130 MachineBasicBlock *
30131 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
30132 MachineBasicBlock *MBB) const {
30133 DebugLoc DL = MI.getDebugLoc();
30134 MachineFunction *MF = MBB->getParent();
30135 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30136 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30137 MachineRegisterInfo &MRI = MF->getRegInfo();
30139 const BasicBlock *BB = MBB->getBasicBlock();
30140 MachineFunction::iterator I = ++MBB->getIterator();
30142 // Memory Reference
30143 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30144 MI.memoperands_end());
30147 unsigned MemOpndSlot = 0;
30149 unsigned CurOp = 0;
30151 DstReg = MI.getOperand(CurOp++).getReg();
30152 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30153 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
30155 unsigned mainDstReg = MRI.createVirtualRegister(RC);
30156 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
30158 MemOpndSlot = CurOp;
30160 MVT PVT = getPointerTy(MF->getDataLayout());
30161 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30162 "Invalid Pointer Size!");
30164 // For v = setjmp(buf), we generate
30167 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
30168 // SjLjSetup restoreMBB
30174 // v = phi(main, restore)
30177 // if base pointer being used, load it from frame
30180 MachineBasicBlock *thisMBB = MBB;
30181 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30182 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30183 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
30184 MF->insert(I, mainMBB);
30185 MF->insert(I, sinkMBB);
30186 MF->push_back(restoreMBB);
30187 restoreMBB->setHasAddressTaken();
30189 MachineInstrBuilder MIB;
30191 // Transfer the remainder of BB and its successor edges to sinkMBB.
30192 sinkMBB->splice(sinkMBB->begin(), MBB,
30193 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30194 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30197 unsigned PtrStoreOpc = 0;
30198 unsigned LabelReg = 0;
30199 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30200 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30201 !isPositionIndependent();
30203 // Prepare IP either in reg or imm.
30204 if (!UseImmLabel) {
30205 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30206 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30207 LabelReg = MRI.createVirtualRegister(PtrRC);
30208 if (Subtarget.is64Bit()) {
30209 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
30213 .addMBB(restoreMBB)
30216 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
30217 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
30218 .addReg(XII->getGlobalBaseReg(MF))
30221 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
30225 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30227 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
30228 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30229 if (i == X86::AddrDisp)
30230 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
30232 MIB.add(MI.getOperand(MemOpndSlot + i));
30235 MIB.addReg(LabelReg);
30237 MIB.addMBB(restoreMBB);
30238 MIB.setMemRefs(MMOs);
30240 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30241 emitSetJmpShadowStackFix(MI, thisMBB);
30245 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
30246 .addMBB(restoreMBB);
30248 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30249 MIB.addRegMask(RegInfo->getNoPreservedMask());
30250 thisMBB->addSuccessor(mainMBB);
30251 thisMBB->addSuccessor(restoreMBB);
30255 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
30256 mainMBB->addSuccessor(sinkMBB);
30259 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
30260 TII->get(X86::PHI), DstReg)
30261 .addReg(mainDstReg).addMBB(mainMBB)
30262 .addReg(restoreDstReg).addMBB(restoreMBB);
30265 if (RegInfo->hasBasePointer(*MF)) {
30266 const bool Uses64BitFramePtr =
30267 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30268 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
30269 X86FI->setRestoreBasePointer(MF);
30270 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
30271 unsigned BasePtr = RegInfo->getBaseRegister();
30272 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
30273 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
30274 FramePtr, true, X86FI->getRestoreBasePointerOffset())
30275 .setMIFlag(MachineInstr::FrameSetup);
30277 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
30278 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30279 restoreMBB->addSuccessor(sinkMBB);
30281 MI.eraseFromParent();
30285 /// Fix the shadow stack using the previously saved SSP pointer.
30286 /// \sa emitSetJmpShadowStackFix
30287 /// \param [in] MI The temporary Machine Instruction for the builtin.
30288 /// \param [in] MBB The Machine Basic Block that will be modified.
30289 /// \return The sink MBB that will perform the future indirect branch.
30290 MachineBasicBlock *
30291 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
30292 MachineBasicBlock *MBB) const {
30293 DebugLoc DL = MI.getDebugLoc();
30294 MachineFunction *MF = MBB->getParent();
30295 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30296 MachineRegisterInfo &MRI = MF->getRegInfo();
30298 // Memory Reference
30299 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30300 MI.memoperands_end());
30302 MVT PVT = getPointerTy(MF->getDataLayout());
30303 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30306 // xor vreg1, vreg1
30308 // test vreg1, vreg1
30309 // je sinkMBB # Jump if Shadow Stack is not supported
30311 // mov buf+24/12(%rip), vreg2
30312 // sub vreg1, vreg2
30313 // jbe sinkMBB # No need to fix the Shadow Stack
30316 // incssp vreg2 # fix the SSP according to the lower 8 bits
30319 // fixShadowLoopPrepareMBB:
30322 // fixShadowLoopMBB:
30325 // jne fixShadowLoopMBB # Iterate until you finish fixing
30326 // # the Shadow Stack
30329 MachineFunction::iterator I = ++MBB->getIterator();
30330 const BasicBlock *BB = MBB->getBasicBlock();
30332 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
30333 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30334 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
30335 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
30336 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
30337 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30338 MF->insert(I, checkSspMBB);
30339 MF->insert(I, fallMBB);
30340 MF->insert(I, fixShadowMBB);
30341 MF->insert(I, fixShadowLoopPrepareMBB);
30342 MF->insert(I, fixShadowLoopMBB);
30343 MF->insert(I, sinkMBB);
30345 // Transfer the remainder of BB and its successor edges to sinkMBB.
30346 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
30348 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30350 MBB->addSuccessor(checkSspMBB);
30352 // Initialize a register with zero.
30353 unsigned ZReg = MRI.createVirtualRegister(PtrRC);
30354 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30355 BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
30357 .addReg(ZReg, RegState::Undef)
30358 .addReg(ZReg, RegState::Undef);
30360 // Read the current SSP Register value to the zeroed register.
30361 unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30362 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30363 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30365 // Check whether the result of the SSP register is zero and jump directly
30367 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
30368 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
30369 .addReg(SSPCopyReg)
30370 .addReg(SSPCopyReg);
30371 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30372 checkSspMBB->addSuccessor(sinkMBB);
30373 checkSspMBB->addSuccessor(fallMBB);
30375 // Reload the previously saved SSP register value.
30376 unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC);
30377 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30378 const int64_t SPPOffset = 3 * PVT.getStoreSize();
30379 MachineInstrBuilder MIB =
30380 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
30381 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30382 const MachineOperand &MO = MI.getOperand(i);
30383 if (i == X86::AddrDisp)
30384 MIB.addDisp(MO, SPPOffset);
30385 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30386 // preserve kill flags.
30387 MIB.addReg(MO.getReg());
30391 MIB.setMemRefs(MMOs);
30393 // Subtract the current SSP from the previous SSP.
30394 unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
30395 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
30396 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
30397 .addReg(PrevSSPReg)
30398 .addReg(SSPCopyReg);
30400 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
30401 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
30402 fallMBB->addSuccessor(sinkMBB);
30403 fallMBB->addSuccessor(fixShadowMBB);
30405 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
30406 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
30407 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
30408 unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
30409 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
30413 // Increase SSP when looking only on the lower 8 bits of the delta.
30414 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
30415 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
30417 // Reset the lower 8 bits.
30418 unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
30419 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
30420 .addReg(SspFirstShrReg)
30423 // Jump if the result of the shift is zero.
30424 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30425 fixShadowMBB->addSuccessor(sinkMBB);
30426 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
30428 // Do a single shift left.
30429 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
30430 unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
30431 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
30432 .addReg(SspSecondShrReg);
30434 // Save the value 128 to a register (will be used next with incssp).
30435 unsigned Value128InReg = MRI.createVirtualRegister(PtrRC);
30436 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
30437 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
30439 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
30441 // Since incssp only looks at the lower 8 bits, we might need to do several
30442 // iterations of incssp until we finish fixing the shadow stack.
30443 unsigned DecReg = MRI.createVirtualRegister(PtrRC);
30444 unsigned CounterReg = MRI.createVirtualRegister(PtrRC);
30445 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
30446 .addReg(SspAfterShlReg)
30447 .addMBB(fixShadowLoopPrepareMBB)
30449 .addMBB(fixShadowLoopMBB);
30451 // Every iteration we increase the SSP by 128.
30452 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
30454 // Every iteration we decrement the counter by 1.
30455 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
30456 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
30458 // Jump if the counter is not zero yet.
30459 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
30460 fixShadowLoopMBB->addSuccessor(sinkMBB);
30461 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
30466 MachineBasicBlock *
30467 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
30468 MachineBasicBlock *MBB) const {
30469 DebugLoc DL = MI.getDebugLoc();
30470 MachineFunction *MF = MBB->getParent();
30471 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30472 MachineRegisterInfo &MRI = MF->getRegInfo();
30474 // Memory Reference
30475 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30476 MI.memoperands_end());
30478 MVT PVT = getPointerTy(MF->getDataLayout());
30479 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30480 "Invalid Pointer Size!");
30482 const TargetRegisterClass *RC =
30483 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30484 unsigned Tmp = MRI.createVirtualRegister(RC);
30485 // Since FP is only updated here but NOT referenced, it's treated as GPR.
30486 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30487 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
30488 unsigned SP = RegInfo->getStackRegister();
30490 MachineInstrBuilder MIB;
30492 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30493 const int64_t SPOffset = 2 * PVT.getStoreSize();
30495 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30496 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
30498 MachineBasicBlock *thisMBB = MBB;
30500 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
30501 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30502 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
30506 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
30507 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30508 const MachineOperand &MO = MI.getOperand(i);
30509 if (MO.isReg()) // Don't add the whole operand, we don't want to
30510 // preserve kill flags.
30511 MIB.addReg(MO.getReg());
30515 MIB.setMemRefs(MMOs);
30518 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
30519 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30520 const MachineOperand &MO = MI.getOperand(i);
30521 if (i == X86::AddrDisp)
30522 MIB.addDisp(MO, LabelOffset);
30523 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30524 // preserve kill flags.
30525 MIB.addReg(MO.getReg());
30529 MIB.setMemRefs(MMOs);
30532 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
30533 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30534 if (i == X86::AddrDisp)
30535 MIB.addDisp(MI.getOperand(i), SPOffset);
30537 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
30538 // the last instruction of the expansion.
30540 MIB.setMemRefs(MMOs);
30543 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
30545 MI.eraseFromParent();
30549 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
30550 MachineBasicBlock *MBB,
30551 MachineBasicBlock *DispatchBB,
30553 DebugLoc DL = MI.getDebugLoc();
30554 MachineFunction *MF = MBB->getParent();
30555 MachineRegisterInfo *MRI = &MF->getRegInfo();
30556 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30558 MVT PVT = getPointerTy(MF->getDataLayout());
30559 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
30564 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30565 !isPositionIndependent();
30568 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30570 const TargetRegisterClass *TRC =
30571 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30572 VR = MRI->createVirtualRegister(TRC);
30573 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30575 if (Subtarget.is64Bit())
30576 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
30580 .addMBB(DispatchBB)
30583 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
30584 .addReg(0) /* TII->getGlobalBaseReg(MF) */
30587 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
30591 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
30592 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
30594 MIB.addMBB(DispatchBB);
30599 MachineBasicBlock *
30600 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
30601 MachineBasicBlock *BB) const {
30602 DebugLoc DL = MI.getDebugLoc();
30603 MachineFunction *MF = BB->getParent();
30604 MachineRegisterInfo *MRI = &MF->getRegInfo();
30605 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30606 int FI = MF->getFrameInfo().getFunctionContextIndex();
30608 // Get a mapping of the call site numbers to all of the landing pads they're
30609 // associated with.
30610 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
30611 unsigned MaxCSNum = 0;
30612 for (auto &MBB : *MF) {
30613 if (!MBB.isEHPad())
30616 MCSymbol *Sym = nullptr;
30617 for (const auto &MI : MBB) {
30618 if (MI.isDebugInstr())
30621 assert(MI.isEHLabel() && "expected EH_LABEL");
30622 Sym = MI.getOperand(0).getMCSymbol();
30626 if (!MF->hasCallSiteLandingPad(Sym))
30629 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
30630 CallSiteNumToLPad[CSI].push_back(&MBB);
30631 MaxCSNum = std::max(MaxCSNum, CSI);
30635 // Get an ordered list of the machine basic blocks for the jump table.
30636 std::vector<MachineBasicBlock *> LPadList;
30637 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
30638 LPadList.reserve(CallSiteNumToLPad.size());
30640 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
30641 for (auto &LP : CallSiteNumToLPad[CSI]) {
30642 LPadList.push_back(LP);
30643 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
30647 assert(!LPadList.empty() &&
30648 "No landing pad destinations for the dispatch jump table!");
30650 // Create the MBBs for the dispatch code.
30652 // Shove the dispatch's address into the return slot in the function context.
30653 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
30654 DispatchBB->setIsEHPad(true);
30656 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
30657 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
30658 DispatchBB->addSuccessor(TrapBB);
30660 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
30661 DispatchBB->addSuccessor(DispContBB);
30664 MF->push_back(DispatchBB);
30665 MF->push_back(DispContBB);
30666 MF->push_back(TrapBB);
30668 // Insert code into the entry block that creates and registers the function
30670 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
30672 // Create the jump table and associated information
30673 unsigned JTE = getJumpTableEncoding();
30674 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
30675 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
30677 const X86RegisterInfo &RI = TII->getRegisterInfo();
30678 // Add a register mask with no preserved registers. This results in all
30679 // registers being marked as clobbered.
30680 if (RI.hasBasePointer(*MF)) {
30681 const bool FPIs64Bit =
30682 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30683 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
30684 MFI->setRestoreBasePointer(MF);
30686 unsigned FP = RI.getFrameRegister(*MF);
30687 unsigned BP = RI.getBaseRegister();
30688 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
30689 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
30690 MFI->getRestoreBasePointerOffset())
30691 .addRegMask(RI.getNoPreservedMask());
30693 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
30694 .addRegMask(RI.getNoPreservedMask());
30697 // IReg is used as an index in a memory operand and therefore can't be SP
30698 unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
30699 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
30700 Subtarget.is64Bit() ? 8 : 4);
30701 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
30703 .addImm(LPadList.size());
30704 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
30706 if (Subtarget.is64Bit()) {
30707 unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
30708 unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
30710 // leaq .LJTI0_0(%rip), BReg
30711 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
30715 .addJumpTableIndex(MJTI)
30717 // movzx IReg64, IReg
30718 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
30721 .addImm(X86::sub_32bit);
30724 case MachineJumpTableInfo::EK_BlockAddress:
30725 // jmpq *(BReg,IReg64,8)
30726 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
30733 case MachineJumpTableInfo::EK_LabelDifference32: {
30734 unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
30735 unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
30736 unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
30738 // movl (BReg,IReg64,4), OReg
30739 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
30745 // movsx OReg64, OReg
30746 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
30747 // addq BReg, OReg64, TReg
30748 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
30752 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
30756 llvm_unreachable("Unexpected jump table encoding");
30759 // jmpl *.LJTI0_0(,IReg,4)
30760 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
30764 .addJumpTableIndex(MJTI)
30768 // Add the jump table entries as successors to the MBB.
30769 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
30770 for (auto &LP : LPadList)
30771 if (SeenMBBs.insert(LP).second)
30772 DispContBB->addSuccessor(LP);
30774 // N.B. the order the invoke BBs are processed in doesn't matter here.
30775 SmallVector<MachineBasicBlock *, 64> MBBLPads;
30776 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
30777 for (MachineBasicBlock *MBB : InvokeBBs) {
30778 // Remove the landing pad successor from the invoke block and replace it
30779 // with the new dispatch block.
30780 // Keep a copy of Successors since it's modified inside the loop.
30781 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
30783 // FIXME: Avoid quadratic complexity.
30784 for (auto MBBS : Successors) {
30785 if (MBBS->isEHPad()) {
30786 MBB->removeSuccessor(MBBS);
30787 MBBLPads.push_back(MBBS);
30791 MBB->addSuccessor(DispatchBB);
30793 // Find the invoke call and mark all of the callee-saved registers as
30794 // 'implicit defined' so that they're spilled. This prevents code from
30795 // moving instructions to before the EH block, where they will never be
30797 for (auto &II : reverse(*MBB)) {
30801 DenseMap<unsigned, bool> DefRegs;
30802 for (auto &MOp : II.operands())
30804 DefRegs[MOp.getReg()] = true;
30806 MachineInstrBuilder MIB(*MF, &II);
30807 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
30808 unsigned Reg = SavedRegs[RI];
30810 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
30817 // Mark all former landing pads as non-landing pads. The dispatch is the only
30818 // landing pad now.
30819 for (auto &LP : MBBLPads)
30820 LP->setIsEHPad(false);
30822 // The instruction is gone now.
30823 MI.eraseFromParent();
30827 MachineBasicBlock *
30828 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
30829 MachineBasicBlock *BB) const {
30830 MachineFunction *MF = BB->getParent();
30831 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30832 DebugLoc DL = MI.getDebugLoc();
30834 switch (MI.getOpcode()) {
30835 default: llvm_unreachable("Unexpected instr type to insert");
30836 case X86::TLS_addr32:
30837 case X86::TLS_addr64:
30838 case X86::TLS_base_addr32:
30839 case X86::TLS_base_addr64:
30840 return EmitLoweredTLSAddr(MI, BB);
30841 case X86::RETPOLINE_CALL32:
30842 case X86::RETPOLINE_CALL64:
30843 case X86::RETPOLINE_TCRETURN32:
30844 case X86::RETPOLINE_TCRETURN64:
30845 return EmitLoweredRetpoline(MI, BB);
30846 case X86::CATCHRET:
30847 return EmitLoweredCatchRet(MI, BB);
30848 case X86::CATCHPAD:
30849 return EmitLoweredCatchPad(MI, BB);
30850 case X86::SEG_ALLOCA_32:
30851 case X86::SEG_ALLOCA_64:
30852 return EmitLoweredSegAlloca(MI, BB);
30853 case X86::TLSCall_32:
30854 case X86::TLSCall_64:
30855 return EmitLoweredTLSCall(MI, BB);
30856 case X86::CMOV_FR32:
30857 case X86::CMOV_FR32X:
30858 case X86::CMOV_FR64:
30859 case X86::CMOV_FR64X:
30860 case X86::CMOV_GR8:
30861 case X86::CMOV_GR16:
30862 case X86::CMOV_GR32:
30863 case X86::CMOV_RFP32:
30864 case X86::CMOV_RFP64:
30865 case X86::CMOV_RFP80:
30866 case X86::CMOV_VR128:
30867 case X86::CMOV_VR128X:
30868 case X86::CMOV_VR256:
30869 case X86::CMOV_VR256X:
30870 case X86::CMOV_VR512:
30871 case X86::CMOV_VK2:
30872 case X86::CMOV_VK4:
30873 case X86::CMOV_VK8:
30874 case X86::CMOV_VK16:
30875 case X86::CMOV_VK32:
30876 case X86::CMOV_VK64:
30877 return EmitLoweredSelect(MI, BB);
30879 case X86::RDFLAGS32:
30880 case X86::RDFLAGS64: {
30882 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
30883 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
30884 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
30885 // Permit reads of the EFLAGS and DF registers without them being defined.
30886 // This intrinsic exists to read external processor state in flags, such as
30887 // the trap flag, interrupt flag, and direction flag, none of which are
30888 // modeled by the backend.
30889 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
30890 "Unexpected register in operand!");
30891 Push->getOperand(2).setIsUndef();
30892 assert(Push->getOperand(3).getReg() == X86::DF &&
30893 "Unexpected register in operand!");
30894 Push->getOperand(3).setIsUndef();
30895 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
30897 MI.eraseFromParent(); // The pseudo is gone now.
30901 case X86::WRFLAGS32:
30902 case X86::WRFLAGS64: {
30904 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
30906 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
30907 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
30908 BuildMI(*BB, MI, DL, TII->get(PopF));
30910 MI.eraseFromParent(); // The pseudo is gone now.
30914 case X86::FP32_TO_INT16_IN_MEM:
30915 case X86::FP32_TO_INT32_IN_MEM:
30916 case X86::FP32_TO_INT64_IN_MEM:
30917 case X86::FP64_TO_INT16_IN_MEM:
30918 case X86::FP64_TO_INT32_IN_MEM:
30919 case X86::FP64_TO_INT64_IN_MEM:
30920 case X86::FP80_TO_INT16_IN_MEM:
30921 case X86::FP80_TO_INT32_IN_MEM:
30922 case X86::FP80_TO_INT64_IN_MEM: {
30923 // Change the floating point control register to use "round towards zero"
30924 // mode when truncating to an integer value.
30925 int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
30926 addFrameReference(BuildMI(*BB, MI, DL,
30927 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
30929 // Load the old value of the control word...
30931 MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
30932 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
30935 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
30937 MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
30938 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
30939 .addReg(OldCW, RegState::Kill).addImm(0xC00);
30941 // Extract to 16 bits.
30943 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
30944 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
30945 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
30947 // Prepare memory for FLDCW.
30948 int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
30949 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
30951 .addReg(NewCW16, RegState::Kill);
30953 // Reload the modified control word now...
30954 addFrameReference(BuildMI(*BB, MI, DL,
30955 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
30957 // Get the X86 opcode to use.
30959 switch (MI.getOpcode()) {
30960 default: llvm_unreachable("illegal opcode!");
30961 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
30962 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
30963 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
30964 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
30965 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
30966 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
30967 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
30968 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
30969 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
30972 X86AddressMode AM = getAddressFromInstr(&MI, 0);
30973 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
30974 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
30976 // Reload the original control word now.
30977 addFrameReference(BuildMI(*BB, MI, DL,
30978 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
30980 MI.eraseFromParent(); // The pseudo instruction is gone now.
30986 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
30988 case X86::VASTART_SAVE_XMM_REGS:
30989 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
30991 case X86::VAARG_64:
30992 return EmitVAARG64WithCustomInserter(MI, BB);
30994 case X86::EH_SjLj_SetJmp32:
30995 case X86::EH_SjLj_SetJmp64:
30996 return emitEHSjLjSetJmp(MI, BB);
30998 case X86::EH_SjLj_LongJmp32:
30999 case X86::EH_SjLj_LongJmp64:
31000 return emitEHSjLjLongJmp(MI, BB);
31002 case X86::Int_eh_sjlj_setup_dispatch:
31003 return EmitSjLjDispatchBlock(MI, BB);
31005 case TargetOpcode::STATEPOINT:
31006 // As an implementation detail, STATEPOINT shares the STACKMAP format at
31007 // this point in the process. We diverge later.
31008 return emitPatchPoint(MI, BB);
31010 case TargetOpcode::STACKMAP:
31011 case TargetOpcode::PATCHPOINT:
31012 return emitPatchPoint(MI, BB);
31014 case TargetOpcode::PATCHABLE_EVENT_CALL:
31015 return emitXRayCustomEvent(MI, BB);
31017 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
31018 return emitXRayTypedEvent(MI, BB);
31020 case X86::LCMPXCHG8B: {
31021 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
31022 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
31023 // requires a memory operand. If it happens that current architecture is
31024 // i686 and for current function we need a base pointer
31025 // - which is ESI for i686 - register allocator would not be able to
31026 // allocate registers for an address in form of X(%reg, %reg, Y)
31027 // - there never would be enough unreserved registers during regalloc
31028 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
31029 // We are giving a hand to register allocator by precomputing the address in
31030 // a new vreg using LEA.
31032 // If it is not i686 or there is no base pointer - nothing to do here.
31033 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
31036 // Even though this code does not necessarily needs the base pointer to
31037 // be ESI, we check for that. The reason: if this assert fails, there are
31038 // some changes happened in the compiler base pointer handling, which most
31039 // probably have to be addressed somehow here.
31040 assert(TRI->getBaseRegister() == X86::ESI &&
31041 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
31042 "base pointer in mind");
31044 MachineRegisterInfo &MRI = MF->getRegInfo();
31045 MVT SPTy = getPointerTy(MF->getDataLayout());
31046 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
31047 unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
31049 X86AddressMode AM = getAddressFromInstr(&MI, 0);
31050 // Regalloc does not need any help when the memory operand of CMPXCHG8B
31051 // does not use index register.
31052 if (AM.IndexReg == X86::NoRegister)
31055 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
31056 // four operand definitions that are E[ABCD] registers. We skip them and
31057 // then insert the LEA.
31058 MachineBasicBlock::iterator MBBI(MI);
31059 while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) ||
31060 MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX))
31063 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
31065 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
31069 case X86::LCMPXCHG16B:
31071 case X86::LCMPXCHG8B_SAVE_EBX:
31072 case X86::LCMPXCHG16B_SAVE_RBX: {
31074 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
31075 if (!BB->isLiveIn(BasePtr))
31076 BB->addLiveIn(BasePtr);
31082 //===----------------------------------------------------------------------===//
31083 // X86 Optimization Hooks
31084 //===----------------------------------------------------------------------===//
31087 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
31088 const APInt &Demanded,
31089 TargetLoweringOpt &TLO) const {
31090 // Only optimize Ands to prevent shrinking a constant that could be
31091 // matched by movzx.
31092 if (Op.getOpcode() != ISD::AND)
31095 EVT VT = Op.getValueType();
31101 unsigned Size = VT.getSizeInBits();
31103 // Make sure the RHS really is a constant.
31104 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
31108 const APInt &Mask = C->getAPIntValue();
31110 // Clear all non-demanded bits initially.
31111 APInt ShrunkMask = Mask & Demanded;
31113 // Find the width of the shrunk mask.
31114 unsigned Width = ShrunkMask.getActiveBits();
31116 // If the mask is all 0s there's nothing to do here.
31120 // Find the next power of 2 width, rounding up to a byte.
31121 Width = PowerOf2Ceil(std::max(Width, 8U));
31122 // Truncate the width to size to handle illegal types.
31123 Width = std::min(Width, Size);
31125 // Calculate a possible zero extend mask for this constant.
31126 APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
31128 // If we aren't changing the mask, just return true to keep it and prevent
31129 // the caller from optimizing.
31130 if (ZeroExtendMask == Mask)
31133 // Make sure the new mask can be represented by a combination of mask bits
31134 // and non-demanded bits.
31135 if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
31138 // Replace the constant with the zero extend mask.
31140 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
31141 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
31142 return TLO.CombineTo(Op, NewOp);
31145 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
31147 const APInt &DemandedElts,
31148 const SelectionDAG &DAG,
31149 unsigned Depth) const {
31150 unsigned BitWidth = Known.getBitWidth();
31151 unsigned Opc = Op.getOpcode();
31152 EVT VT = Op.getValueType();
31153 assert((Opc >= ISD::BUILTIN_OP_END ||
31154 Opc == ISD::INTRINSIC_WO_CHAIN ||
31155 Opc == ISD::INTRINSIC_W_CHAIN ||
31156 Opc == ISD::INTRINSIC_VOID) &&
31157 "Should use MaskedValueIsZero if you don't know whether Op"
31158 " is a target node!");
31163 case X86ISD::SETCC:
31164 Known.Zero.setBitsFrom(1);
31166 case X86ISD::MOVMSK: {
31167 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
31168 Known.Zero.setBitsFrom(NumLoBits);
31171 case X86ISD::PEXTRB:
31172 case X86ISD::PEXTRW: {
31173 SDValue Src = Op.getOperand(0);
31174 EVT SrcVT = Src.getValueType();
31175 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
31176 Op.getConstantOperandVal(1));
31177 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
31178 Known = Known.zextOrTrunc(BitWidth, false);
31179 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
31182 case X86ISD::VSRAI:
31183 case X86ISD::VSHLI:
31184 case X86ISD::VSRLI: {
31185 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31186 if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
31187 Known.setAllZero();
31191 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31192 unsigned ShAmt = ShiftImm->getZExtValue();
31193 if (Opc == X86ISD::VSHLI) {
31194 Known.Zero <<= ShAmt;
31195 Known.One <<= ShAmt;
31196 // Low bits are known zero.
31197 Known.Zero.setLowBits(ShAmt);
31198 } else if (Opc == X86ISD::VSRLI) {
31199 Known.Zero.lshrInPlace(ShAmt);
31200 Known.One.lshrInPlace(ShAmt);
31201 // High bits are known zero.
31202 Known.Zero.setHighBits(ShAmt);
31204 Known.Zero.ashrInPlace(ShAmt);
31205 Known.One.ashrInPlace(ShAmt);
31210 case X86ISD::PACKUS: {
31211 // PACKUS is just a truncation if the upper half is zero.
31212 APInt DemandedLHS, DemandedRHS;
31213 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
31215 Known.One = APInt::getAllOnesValue(BitWidth * 2);
31216 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
31219 if (!!DemandedLHS) {
31220 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31221 Known.One &= Known2.One;
31222 Known.Zero &= Known2.Zero;
31224 if (!!DemandedRHS) {
31225 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31226 Known.One &= Known2.One;
31227 Known.Zero &= Known2.Zero;
31230 if (Known.countMinLeadingZeros() < BitWidth)
31232 Known = Known.trunc(BitWidth);
31235 case X86ISD::ANDNP: {
31237 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31238 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31240 // ANDNP = (~X & Y);
31241 Known.One &= Known2.Zero;
31242 Known.Zero |= Known2.One;
31245 case X86ISD::FOR: {
31247 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31248 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31250 // Output known-0 bits are only known if clear in both the LHS & RHS.
31251 Known.Zero &= Known2.Zero;
31252 // Output known-1 are known to be set if set in either the LHS | RHS.
31253 Known.One |= Known2.One;
31256 case X86ISD::CMOV: {
31257 Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
31258 // If we don't know any bits, early out.
31259 if (Known.isUnknown())
31261 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
31263 // Only known if known in both the LHS and RHS.
31264 Known.One &= Known2.One;
31265 Known.Zero &= Known2.Zero;
31270 // Handle target shuffles.
31271 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31272 if (isTargetShuffle(Opc)) {
31274 SmallVector<int, 64> Mask;
31275 SmallVector<SDValue, 2> Ops;
31276 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31278 unsigned NumOps = Ops.size();
31279 unsigned NumElts = VT.getVectorNumElements();
31280 if (Mask.size() == NumElts) {
31281 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31282 Known.Zero.setAllBits(); Known.One.setAllBits();
31283 for (unsigned i = 0; i != NumElts; ++i) {
31284 if (!DemandedElts[i])
31287 if (M == SM_SentinelUndef) {
31288 // For UNDEF elements, we don't know anything about the common state
31289 // of the shuffle result.
31292 } else if (M == SM_SentinelZero) {
31293 Known.One.clearAllBits();
31296 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
31297 "Shuffle index out of range");
31299 unsigned OpIdx = (unsigned)M / NumElts;
31300 unsigned EltIdx = (unsigned)M % NumElts;
31301 if (Ops[OpIdx].getValueType() != VT) {
31302 // TODO - handle target shuffle ops with different value types.
31306 DemandedOps[OpIdx].setBit(EltIdx);
31308 // Known bits are the values that are shared by every demanded element.
31309 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
31310 if (!DemandedOps[i])
31313 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
31314 Known.One &= Known2.One;
31315 Known.Zero &= Known2.Zero;
31322 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
31323 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
31324 unsigned Depth) const {
31325 EVT VT = Op.getValueType();
31326 unsigned VTBits = VT.getScalarSizeInBits();
31327 unsigned Opcode = Op.getOpcode();
31329 case X86ISD::SETCC_CARRY:
31330 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
31333 case X86ISD::VTRUNC: {
31334 // TODO: Add DemandedElts support.
31335 SDValue Src = Op.getOperand(0);
31336 unsigned NumSrcBits = Src.getScalarValueSizeInBits();
31337 assert(VTBits < NumSrcBits && "Illegal truncation input type");
31338 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
31339 if (Tmp > (NumSrcBits - VTBits))
31340 return Tmp - (NumSrcBits - VTBits);
31344 case X86ISD::PACKSS: {
31345 // PACKSS is just a truncation if the sign bits extend to the packed size.
31346 APInt DemandedLHS, DemandedRHS;
31347 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
31350 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
31351 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
31353 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31355 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31356 unsigned Tmp = std::min(Tmp0, Tmp1);
31357 if (Tmp > (SrcBits - VTBits))
31358 return Tmp - (SrcBits - VTBits);
31362 case X86ISD::VSHLI: {
31363 SDValue Src = Op.getOperand(0);
31364 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
31365 if (ShiftVal.uge(VTBits))
31366 return VTBits; // Shifted all bits out --> zero.
31367 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31368 if (ShiftVal.uge(Tmp))
31369 return 1; // Shifted all sign bits out --> unknown.
31370 return Tmp - ShiftVal.getZExtValue();
31373 case X86ISD::VSRAI: {
31374 SDValue Src = Op.getOperand(0);
31375 APInt ShiftVal = Op.getConstantOperandAPInt(1);
31376 if (ShiftVal.uge(VTBits - 1))
31377 return VTBits; // Sign splat.
31378 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31380 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
31383 case X86ISD::PCMPGT:
31384 case X86ISD::PCMPEQ:
31386 case X86ISD::VPCOM:
31387 case X86ISD::VPCOMU:
31388 // Vector compares return zero/all-bits result values.
31391 case X86ISD::ANDNP: {
31393 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
31394 if (Tmp0 == 1) return 1; // Early out.
31396 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
31397 return std::min(Tmp0, Tmp1);
31400 case X86ISD::CMOV: {
31401 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
31402 if (Tmp0 == 1) return 1; // Early out.
31403 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
31404 return std::min(Tmp0, Tmp1);
31408 // Handle target shuffles.
31409 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31410 if (isTargetShuffle(Opcode)) {
31412 SmallVector<int, 64> Mask;
31413 SmallVector<SDValue, 2> Ops;
31414 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31416 unsigned NumOps = Ops.size();
31417 unsigned NumElts = VT.getVectorNumElements();
31418 if (Mask.size() == NumElts) {
31419 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31420 for (unsigned i = 0; i != NumElts; ++i) {
31421 if (!DemandedElts[i])
31424 if (M == SM_SentinelUndef) {
31425 // For UNDEF elements, we don't know anything about the common state
31426 // of the shuffle result.
31428 } else if (M == SM_SentinelZero) {
31429 // Zero = all sign bits.
31432 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
31433 "Shuffle index out of range");
31435 unsigned OpIdx = (unsigned)M / NumElts;
31436 unsigned EltIdx = (unsigned)M % NumElts;
31437 if (Ops[OpIdx].getValueType() != VT) {
31438 // TODO - handle target shuffle ops with different value types.
31441 DemandedOps[OpIdx].setBit(EltIdx);
31443 unsigned Tmp0 = VTBits;
31444 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
31445 if (!DemandedOps[i])
31448 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
31449 Tmp0 = std::min(Tmp0, Tmp1);
31460 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
31461 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
31462 return N->getOperand(0);
31466 // Attempt to match a combined shuffle mask against supported unary shuffle
31468 // TODO: Investigate sharing more of this with shuffle lowering.
31469 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31470 bool AllowFloatDomain, bool AllowIntDomain,
31471 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
31472 const X86Subtarget &Subtarget, unsigned &Shuffle,
31473 MVT &SrcVT, MVT &DstVT) {
31474 unsigned NumMaskElts = Mask.size();
31475 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
31477 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
31478 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
31479 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
31480 Shuffle = X86ISD::VZEXT_MOVL;
31481 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31485 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
31486 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
31487 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
31488 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
31489 unsigned MaxScale = 64 / MaskEltSize;
31490 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
31491 bool MatchAny = true;
31492 bool MatchZero = true;
31493 unsigned NumDstElts = NumMaskElts / Scale;
31494 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
31495 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
31496 MatchAny = MatchZero = false;
31499 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
31500 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
31502 if (MatchAny || MatchZero) {
31503 assert(MatchZero && "Failed to match zext but matched aext?");
31504 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
31505 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
31506 MVT::getIntegerVT(MaskEltSize);
31507 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
31509 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
31510 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
31512 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
31513 if (SrcVT.getVectorNumElements() != NumDstElts)
31514 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
31516 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
31517 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
31523 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
31524 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
31525 isUndefOrEqual(Mask[0], 0) &&
31526 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
31527 Shuffle = X86ISD::VZEXT_MOVL;
31528 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31532 // Check if we have SSE3 which will let us use MOVDDUP etc. The
31533 // instructions are no slower than UNPCKLPD but has the option to
31534 // fold the input operand into even an unaligned memory load.
31535 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
31536 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
31537 Shuffle = X86ISD::MOVDDUP;
31538 SrcVT = DstVT = MVT::v2f64;
31541 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31542 Shuffle = X86ISD::MOVSLDUP;
31543 SrcVT = DstVT = MVT::v4f32;
31546 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
31547 Shuffle = X86ISD::MOVSHDUP;
31548 SrcVT = DstVT = MVT::v4f32;
31553 if (MaskVT.is256BitVector() && AllowFloatDomain) {
31554 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
31555 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31556 Shuffle = X86ISD::MOVDDUP;
31557 SrcVT = DstVT = MVT::v4f64;
31560 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31561 Shuffle = X86ISD::MOVSLDUP;
31562 SrcVT = DstVT = MVT::v8f32;
31565 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
31566 Shuffle = X86ISD::MOVSHDUP;
31567 SrcVT = DstVT = MVT::v8f32;
31572 if (MaskVT.is512BitVector() && AllowFloatDomain) {
31573 assert(Subtarget.hasAVX512() &&
31574 "AVX512 required for 512-bit vector shuffles");
31575 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31576 Shuffle = X86ISD::MOVDDUP;
31577 SrcVT = DstVT = MVT::v8f64;
31580 if (isTargetShuffleEquivalent(
31581 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
31582 Shuffle = X86ISD::MOVSLDUP;
31583 SrcVT = DstVT = MVT::v16f32;
31586 if (isTargetShuffleEquivalent(
31587 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
31588 Shuffle = X86ISD::MOVSHDUP;
31589 SrcVT = DstVT = MVT::v16f32;
31597 // Attempt to match a combined shuffle mask against supported unary immediate
31598 // permute instructions.
31599 // TODO: Investigate sharing more of this with shuffle lowering.
31600 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
31601 const APInt &Zeroable,
31602 bool AllowFloatDomain, bool AllowIntDomain,
31603 const X86Subtarget &Subtarget,
31604 unsigned &Shuffle, MVT &ShuffleVT,
31605 unsigned &PermuteImm) {
31606 unsigned NumMaskElts = Mask.size();
31607 unsigned InputSizeInBits = MaskVT.getSizeInBits();
31608 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
31609 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
31611 bool ContainsZeros =
31612 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
31614 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
31615 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
31616 // Check for lane crossing permutes.
31617 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
31618 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
31619 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
31620 Shuffle = X86ISD::VPERMI;
31621 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
31622 PermuteImm = getV4X86ShuffleImm(Mask);
31625 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
31626 SmallVector<int, 4> RepeatedMask;
31627 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
31628 Shuffle = X86ISD::VPERMI;
31629 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
31630 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
31634 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
31635 // VPERMILPD can permute with a non-repeating shuffle.
31636 Shuffle = X86ISD::VPERMILPI;
31637 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
31639 for (int i = 0, e = Mask.size(); i != e; ++i) {
31641 if (M == SM_SentinelUndef)
31643 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
31644 PermuteImm |= (M & 1) << i;
31650 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
31651 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
31652 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
31653 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
31654 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
31655 SmallVector<int, 4> RepeatedMask;
31656 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31657 // Narrow the repeated mask to create 32-bit element permutes.
31658 SmallVector<int, 4> WordMask = RepeatedMask;
31659 if (MaskScalarSizeInBits == 64)
31660 scaleShuffleMask<int>(2, RepeatedMask, WordMask);
31662 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
31663 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
31664 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
31665 PermuteImm = getV4X86ShuffleImm(WordMask);
31670 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
31671 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
31672 SmallVector<int, 4> RepeatedMask;
31673 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31674 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
31675 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
31677 // PSHUFLW: permute lower 4 elements only.
31678 if (isUndefOrInRange(LoMask, 0, 4) &&
31679 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
31680 Shuffle = X86ISD::PSHUFLW;
31681 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31682 PermuteImm = getV4X86ShuffleImm(LoMask);
31686 // PSHUFHW: permute upper 4 elements only.
31687 if (isUndefOrInRange(HiMask, 4, 8) &&
31688 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
31689 // Offset the HiMask so that we can create the shuffle immediate.
31690 int OffsetHiMask[4];
31691 for (int i = 0; i != 4; ++i)
31692 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
31694 Shuffle = X86ISD::PSHUFHW;
31695 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31696 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
31702 // Attempt to match against byte/bit shifts.
31703 // FIXME: Add 512-bit support.
31704 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31705 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
31706 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
31707 Mask, 0, Zeroable, Subtarget);
31708 if (0 < ShiftAmt) {
31709 PermuteImm = (unsigned)ShiftAmt;
31717 // Attempt to match a combined unary shuffle mask against supported binary
31718 // shuffle instructions.
31719 // TODO: Investigate sharing more of this with shuffle lowering.
31720 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31721 bool AllowFloatDomain, bool AllowIntDomain,
31722 SDValue &V1, SDValue &V2, const SDLoc &DL,
31723 SelectionDAG &DAG, const X86Subtarget &Subtarget,
31724 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
31726 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
31728 if (MaskVT.is128BitVector()) {
31729 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
31731 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
31732 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
31733 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
31736 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
31738 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
31739 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
31742 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
31743 (AllowFloatDomain || !Subtarget.hasSSE41())) {
31745 Shuffle = X86ISD::MOVSD;
31746 SrcVT = DstVT = MVT::v2f64;
31749 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
31750 (AllowFloatDomain || !Subtarget.hasSSE41())) {
31751 Shuffle = X86ISD::MOVSS;
31752 SrcVT = DstVT = MVT::v4f32;
31757 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
31758 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
31759 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
31760 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
31761 if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
31768 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
31769 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
31770 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31771 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
31772 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
31773 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
31774 if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
31776 SrcVT = DstVT = MaskVT;
31777 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
31778 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
31786 static bool matchBinaryPermuteShuffle(
31787 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
31788 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
31789 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
31790 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
31791 unsigned NumMaskElts = Mask.size();
31792 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
31794 // Attempt to match against PALIGNR byte rotate.
31795 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
31796 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
31797 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
31798 if (0 < ByteRotation) {
31799 Shuffle = X86ISD::PALIGNR;
31800 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
31801 PermuteImm = ByteRotation;
31806 // Attempt to combine to X86ISD::BLENDI.
31807 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
31808 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
31809 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
31810 uint64_t BlendMask = 0;
31811 bool ForceV1Zero = false, ForceV2Zero = false;
31812 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
31813 if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero,
31815 if (MaskVT == MVT::v16i16) {
31816 // We can only use v16i16 PBLENDW if the lanes are repeated.
31817 SmallVector<int, 8> RepeatedMask;
31818 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
31820 assert(RepeatedMask.size() == 8 &&
31821 "Repeated mask size doesn't match!");
31823 for (int i = 0; i < 8; ++i)
31824 if (RepeatedMask[i] >= 8)
31825 PermuteImm |= 1 << i;
31826 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
31827 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
31828 Shuffle = X86ISD::BLENDI;
31829 ShuffleVT = MaskVT;
31833 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
31834 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
31835 PermuteImm = (unsigned)BlendMask;
31836 Shuffle = X86ISD::BLENDI;
31837 ShuffleVT = MaskVT;
31843 // Attempt to combine to INSERTPS.
31844 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
31845 MaskVT.is128BitVector()) {
31846 if (Zeroable.getBoolValue() &&
31847 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
31848 Shuffle = X86ISD::INSERTPS;
31849 ShuffleVT = MVT::v4f32;
31854 // Attempt to combine to SHUFPD.
31855 if (AllowFloatDomain && EltSizeInBits == 64 &&
31856 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
31857 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
31858 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
31859 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) {
31860 Shuffle = X86ISD::SHUFP;
31861 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
31866 // Attempt to combine to SHUFPS.
31867 if (AllowFloatDomain && EltSizeInBits == 32 &&
31868 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
31869 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
31870 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
31871 SmallVector<int, 4> RepeatedMask;
31872 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
31873 // Match each half of the repeated mask, to determine if its just
31874 // referencing one of the vectors, is zeroable or entirely undef.
31875 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
31876 int M0 = RepeatedMask[Offset];
31877 int M1 = RepeatedMask[Offset + 1];
31879 if (isUndefInRange(RepeatedMask, Offset, 2)) {
31880 return DAG.getUNDEF(MaskVT);
31881 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
31882 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
31883 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
31884 return getZeroVector(MaskVT, Subtarget, DAG, DL);
31885 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
31886 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
31887 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
31889 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
31890 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
31891 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
31898 int ShufMask[4] = {-1, -1, -1, -1};
31899 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
31900 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
31905 Shuffle = X86ISD::SHUFP;
31906 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
31907 PermuteImm = getV4X86ShuffleImm(ShufMask);
31916 static SDValue combineX86ShuffleChainWithExtract(
31917 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
31918 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
31919 const X86Subtarget &Subtarget);
31921 /// Combine an arbitrary chain of shuffles into a single instruction if
31924 /// This is the leaf of the recursive combine below. When we have found some
31925 /// chain of single-use x86 shuffle instructions and accumulated the combined
31926 /// shuffle mask represented by them, this will try to pattern match that mask
31927 /// into either a single instruction if there is a special purpose instruction
31928 /// for this operation, or into a PSHUFB instruction which is a fully general
31929 /// instruction but should only be used to replace chains over a certain depth.
31930 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
31931 ArrayRef<int> BaseMask, int Depth,
31932 bool HasVariableMask,
31933 bool AllowVariableMask, SelectionDAG &DAG,
31934 const X86Subtarget &Subtarget) {
31935 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
31936 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
31937 "Unexpected number of shuffle inputs!");
31939 // Find the inputs that enter the chain. Note that multiple uses are OK
31940 // here, we're not going to remove the operands we find.
31941 bool UnaryShuffle = (Inputs.size() == 1);
31942 SDValue V1 = peekThroughBitcasts(Inputs[0]);
31943 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
31944 : peekThroughBitcasts(Inputs[1]));
31946 MVT VT1 = V1.getSimpleValueType();
31947 MVT VT2 = V2.getSimpleValueType();
31948 MVT RootVT = Root.getSimpleValueType();
31949 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
31950 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
31951 "Vector size mismatch");
31956 unsigned NumBaseMaskElts = BaseMask.size();
31957 if (NumBaseMaskElts == 1) {
31958 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
31959 return DAG.getBitcast(RootVT, V1);
31962 unsigned RootSizeInBits = RootVT.getSizeInBits();
31963 unsigned NumRootElts = RootVT.getVectorNumElements();
31964 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
31965 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
31966 (RootVT.isFloatingPoint() && Depth >= 2) ||
31967 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
31969 // Don't combine if we are a AVX512/EVEX target and the mask element size
31970 // is different from the root element size - this would prevent writemasks
31971 // from being reused.
31972 // TODO - this currently prevents all lane shuffles from occurring.
31973 // TODO - check for writemasks usage instead of always preventing combining.
31974 // TODO - attempt to narrow Mask back to writemask size.
31975 bool IsEVEXShuffle =
31976 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
31978 // Attempt to match a subvector broadcast.
31979 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
31980 if (UnaryShuffle &&
31981 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
31982 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
31983 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
31984 SDValue Src = Inputs[0];
31985 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
31986 Src.getOperand(0).isUndef() &&
31987 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
31988 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
31989 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
31990 Src.getValueType(),
31991 Src.getOperand(1)));
31996 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
31998 // Handle 128-bit lane shuffles of 256-bit vectors.
31999 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
32000 // we need to use the zeroing feature.
32001 // TODO - this should support binary shuffles.
32002 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
32003 !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
32004 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
32005 if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
32006 return SDValue(); // Nothing to do!
32007 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
32008 unsigned PermMask = 0;
32009 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
32010 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
32012 Res = DAG.getBitcast(ShuffleVT, V1);
32013 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
32014 DAG.getUNDEF(ShuffleVT),
32015 DAG.getConstant(PermMask, DL, MVT::i8));
32016 return DAG.getBitcast(RootVT, Res);
32019 // For masks that have been widened to 128-bit elements or more,
32020 // narrow back down to 64-bit elements.
32021 SmallVector<int, 64> Mask;
32022 if (BaseMaskEltSizeInBits > 64) {
32023 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
32024 int MaskScale = BaseMaskEltSizeInBits / 64;
32025 scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
32027 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
32030 unsigned NumMaskElts = Mask.size();
32031 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
32033 // Determine the effective mask value type.
32034 FloatDomain &= (32 <= MaskEltSizeInBits);
32035 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
32036 : MVT::getIntegerVT(MaskEltSizeInBits);
32037 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
32039 // Only allow legal mask types.
32040 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
32043 // Attempt to match the mask against known shuffle patterns.
32044 MVT ShuffleSrcVT, ShuffleVT;
32045 unsigned Shuffle, PermuteImm;
32047 // Which shuffle domains are permitted?
32048 // Permit domain crossing at higher combine depths.
32049 // TODO: Should we indicate which domain is preferred if both are allowed?
32050 bool AllowFloatDomain = FloatDomain || (Depth > 3);
32051 bool AllowIntDomain = (!FloatDomain || (Depth > 3)) && Subtarget.hasSSE2() &&
32052 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
32054 // Determine zeroable mask elements.
32055 APInt Zeroable(NumMaskElts, 0);
32056 for (unsigned i = 0; i != NumMaskElts; ++i)
32057 if (isUndefOrZero(Mask[i]))
32058 Zeroable.setBit(i);
32060 if (UnaryShuffle) {
32061 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
32062 // directly if we don't shuffle the lower element and we shuffle the upper
32063 // (zero) elements within themselves.
32064 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
32065 (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
32066 MaskEltSizeInBits) == 0) {
32068 cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
32070 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
32071 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
32072 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
32073 return DAG.getBitcast(RootVT, V1);
32077 // Attempt to match against broadcast-from-vector.
32078 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
32079 if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
32080 && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
32081 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
32082 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
32083 if (V1.getValueType() == MaskVT &&
32084 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
32085 MayFoldLoad(V1.getOperand(0))) {
32086 if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
32087 return SDValue(); // Nothing to do!
32088 Res = V1.getOperand(0);
32089 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32090 return DAG.getBitcast(RootVT, Res);
32092 if (Subtarget.hasAVX2()) {
32093 if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
32094 return SDValue(); // Nothing to do!
32095 Res = DAG.getBitcast(MaskVT, V1);
32096 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32097 return DAG.getBitcast(RootVT, Res);
32102 SDValue NewV1 = V1; // Save operand in case early exit happens.
32103 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32104 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32106 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32107 if (Depth == 1 && Root.getOpcode() == Shuffle)
32108 return SDValue(); // Nothing to do!
32109 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
32110 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
32111 return DAG.getBitcast(RootVT, Res);
32114 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
32115 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
32117 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32118 if (Depth == 1 && Root.getOpcode() == Shuffle)
32119 return SDValue(); // Nothing to do!
32120 Res = DAG.getBitcast(ShuffleVT, V1);
32121 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
32122 DAG.getConstant(PermuteImm, DL, MVT::i8));
32123 return DAG.getBitcast(RootVT, Res);
32127 SDValue NewV1 = V1; // Save operands in case early exit happens.
32128 SDValue NewV2 = V2;
32129 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32130 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32131 ShuffleVT, UnaryShuffle) &&
32132 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32133 if (Depth == 1 && Root.getOpcode() == Shuffle)
32134 return SDValue(); // Nothing to do!
32135 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
32136 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
32137 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
32138 return DAG.getBitcast(RootVT, Res);
32141 NewV1 = V1; // Save operands in case early exit happens.
32143 if (matchBinaryPermuteShuffle(
32144 MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
32145 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
32146 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32147 if (Depth == 1 && Root.getOpcode() == Shuffle)
32148 return SDValue(); // Nothing to do!
32149 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
32150 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
32151 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
32152 DAG.getConstant(PermuteImm, DL, MVT::i8));
32153 return DAG.getBitcast(RootVT, Res);
32156 // Typically from here on, we need an integer version of MaskVT.
32157 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
32158 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
32160 // Annoyingly, SSE4A instructions don't map into the above match helpers.
32161 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
32162 uint64_t BitLen, BitIdx;
32163 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
32165 if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI)
32166 return SDValue(); // Nothing to do!
32167 V1 = DAG.getBitcast(IntMaskVT, V1);
32168 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
32169 DAG.getConstant(BitLen, DL, MVT::i8),
32170 DAG.getConstant(BitIdx, DL, MVT::i8));
32171 return DAG.getBitcast(RootVT, Res);
32174 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
32175 if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI)
32176 return SDValue(); // Nothing to do!
32177 V1 = DAG.getBitcast(IntMaskVT, V1);
32178 V2 = DAG.getBitcast(IntMaskVT, V2);
32179 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
32180 DAG.getConstant(BitLen, DL, MVT::i8),
32181 DAG.getConstant(BitIdx, DL, MVT::i8));
32182 return DAG.getBitcast(RootVT, Res);
32186 // Don't try to re-form single instruction chains under any circumstances now
32187 // that we've done encoding canonicalization for them.
32191 // Depth threshold above which we can efficiently use variable mask shuffles.
32192 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3;
32193 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
32195 bool MaskContainsZeros =
32196 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
32198 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
32199 // If we have a single input lane-crossing shuffle then lower to VPERMV.
32200 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32201 ((Subtarget.hasAVX2() &&
32202 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32203 (Subtarget.hasAVX512() &&
32204 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32205 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32206 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32207 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32208 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32209 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32210 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32211 Res = DAG.getBitcast(MaskVT, V1);
32212 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
32213 return DAG.getBitcast(RootVT, Res);
32216 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
32217 // vector as the second source.
32218 if (UnaryShuffle && AllowVariableMask &&
32219 ((Subtarget.hasAVX512() &&
32220 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32221 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32222 (Subtarget.hasVLX() &&
32223 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32224 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32225 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32226 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32227 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32228 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32229 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
32230 for (unsigned i = 0; i != NumMaskElts; ++i)
32231 if (Mask[i] == SM_SentinelZero)
32232 Mask[i] = NumMaskElts + i;
32234 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32235 Res = DAG.getBitcast(MaskVT, V1);
32236 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
32237 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
32238 return DAG.getBitcast(RootVT, Res);
32241 // If that failed and either input is extracted then try to combine as a
32242 // shuffle with the larger type.
32243 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32244 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32246 return WideShuffle;
32248 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
32249 if (AllowVariableMask && !MaskContainsZeros &&
32250 ((Subtarget.hasAVX512() &&
32251 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32252 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32253 (Subtarget.hasVLX() &&
32254 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32255 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32256 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32257 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32258 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32259 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32260 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32261 V1 = DAG.getBitcast(MaskVT, V1);
32262 V2 = DAG.getBitcast(MaskVT, V2);
32263 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32264 return DAG.getBitcast(RootVT, Res);
32269 // See if we can combine a single input shuffle with zeros to a bit-mask,
32270 // which is much simpler than any shuffle.
32271 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
32272 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
32273 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
32274 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
32275 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
32276 APInt UndefElts(NumMaskElts, 0);
32277 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
32278 for (unsigned i = 0; i != NumMaskElts; ++i) {
32280 if (M == SM_SentinelUndef) {
32281 UndefElts.setBit(i);
32284 if (M == SM_SentinelZero)
32286 EltBits[i] = AllOnes;
32288 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
32289 Res = DAG.getBitcast(MaskVT, V1);
32290 unsigned AndOpcode =
32291 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
32292 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
32293 return DAG.getBitcast(RootVT, Res);
32296 // If we have a single input shuffle with different shuffle patterns in the
32297 // the 128-bit lanes use the variable mask to VPERMILPS.
32298 // TODO Combine other mask types at higher depths.
32299 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32300 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
32301 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
32302 SmallVector<SDValue, 16> VPermIdx;
32303 for (int M : Mask) {
32305 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
32306 VPermIdx.push_back(Idx);
32308 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
32309 Res = DAG.getBitcast(MaskVT, V1);
32310 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
32311 return DAG.getBitcast(RootVT, Res);
32314 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
32315 // to VPERMIL2PD/VPERMIL2PS.
32316 if (AllowVariableMask && Subtarget.hasXOP() &&
32317 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
32318 MaskVT == MVT::v8f32)) {
32319 // VPERMIL2 Operation.
32320 // Bits[3] - Match Bit.
32321 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
32322 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
32323 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
32324 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
32325 SmallVector<int, 8> VPerm2Idx;
32326 unsigned M2ZImm = 0;
32327 for (int M : Mask) {
32328 if (M == SM_SentinelUndef) {
32329 VPerm2Idx.push_back(-1);
32332 if (M == SM_SentinelZero) {
32334 VPerm2Idx.push_back(8);
32337 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
32338 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
32339 VPerm2Idx.push_back(Index);
32341 V1 = DAG.getBitcast(MaskVT, V1);
32342 V2 = DAG.getBitcast(MaskVT, V2);
32343 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
32344 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
32345 DAG.getConstant(M2ZImm, DL, MVT::i8));
32346 return DAG.getBitcast(RootVT, Res);
32349 // If we have 3 or more shuffle instructions or a chain involving a variable
32350 // mask, we can replace them with a single PSHUFB instruction profitably.
32351 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
32352 // instructions, but in practice PSHUFB tends to be *very* fast so we're
32353 // more aggressive.
32354 if (UnaryShuffle && AllowVariableMask &&
32355 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32356 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
32357 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
32358 SmallVector<SDValue, 16> PSHUFBMask;
32359 int NumBytes = RootVT.getSizeInBits() / 8;
32360 int Ratio = NumBytes / NumMaskElts;
32361 for (int i = 0; i < NumBytes; ++i) {
32362 int M = Mask[i / Ratio];
32363 if (M == SM_SentinelUndef) {
32364 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
32367 if (M == SM_SentinelZero) {
32368 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
32371 M = Ratio * M + i % Ratio;
32372 assert((M / 16) == (i / 16) && "Lane crossing detected");
32373 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32375 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
32376 Res = DAG.getBitcast(ByteVT, V1);
32377 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
32378 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
32379 return DAG.getBitcast(RootVT, Res);
32382 // With XOP, if we have a 128-bit binary input shuffle we can always combine
32383 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
32384 // slower than PSHUFB on targets that support both.
32385 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
32386 // VPPERM Mask Operation
32387 // Bits[4:0] - Byte Index (0 - 31)
32388 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
32389 SmallVector<SDValue, 16> VPPERMMask;
32391 int Ratio = NumBytes / NumMaskElts;
32392 for (int i = 0; i < NumBytes; ++i) {
32393 int M = Mask[i / Ratio];
32394 if (M == SM_SentinelUndef) {
32395 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
32398 if (M == SM_SentinelZero) {
32399 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
32402 M = Ratio * M + i % Ratio;
32403 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32405 MVT ByteVT = MVT::v16i8;
32406 V1 = DAG.getBitcast(ByteVT, V1);
32407 V2 = DAG.getBitcast(ByteVT, V2);
32408 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
32409 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
32410 return DAG.getBitcast(RootVT, Res);
32413 // If that failed and either input is extracted then try to combine as a
32414 // shuffle with the larger type.
32415 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32416 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32418 return WideShuffle;
32420 // If we have a dual input shuffle then lower to VPERMV3.
32421 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32422 ((Subtarget.hasAVX512() &&
32423 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32424 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32425 (Subtarget.hasVLX() &&
32426 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
32427 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
32428 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32429 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32430 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
32431 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
32432 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32433 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
32434 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
32435 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32436 V1 = DAG.getBitcast(MaskVT, V1);
32437 V2 = DAG.getBitcast(MaskVT, V2);
32438 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32439 return DAG.getBitcast(RootVT, Res);
32442 // Failed to find any combines.
32446 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
32447 // instruction if possible.
32449 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
32450 // type size to attempt to combine:
32451 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
32453 // extract_subvector(shuffle(x,y,m2),0)
32454 static SDValue combineX86ShuffleChainWithExtract(
32455 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32456 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32457 const X86Subtarget &Subtarget) {
32458 unsigned NumMaskElts = BaseMask.size();
32459 unsigned NumInputs = Inputs.size();
32460 if (NumInputs == 0)
32463 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
32464 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
32466 // Peek through subvectors.
32467 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
32468 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
32469 for (unsigned i = 0; i != NumInputs; ++i) {
32470 SDValue &Src = WideInputs[i];
32471 unsigned &Offset = Offsets[i];
32472 Src = peekThroughBitcasts(Src);
32473 EVT BaseVT = Src.getValueType();
32474 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
32475 isa<ConstantSDNode>(Src.getOperand(1))) {
32476 Offset += Src.getConstantOperandVal(1);
32477 Src = Src.getOperand(0);
32479 WideSizeInBits = std::max(WideSizeInBits, Src.getValueSizeInBits());
32480 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
32481 "Unexpected subvector extraction");
32482 Offset /= BaseVT.getVectorNumElements();
32483 Offset *= NumMaskElts;
32486 // Bail if we're always extracting from the lowest subvectors,
32487 // combineX86ShuffleChain should match this for the current width.
32488 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
32491 EVT RootVT = Root.getValueType();
32492 unsigned RootSizeInBits = RootVT.getSizeInBits();
32493 unsigned Scale = WideSizeInBits / RootSizeInBits;
32494 assert((WideSizeInBits % RootSizeInBits) == 0 &&
32495 "Unexpected subvector extraction");
32497 // If the src vector types aren't the same, see if we can extend
32498 // them to match each other.
32499 // TODO: Support different scalar types?
32500 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
32501 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
32502 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
32503 Op.getValueType().getScalarType() != WideSVT;
32507 for (SDValue &NewInput : WideInputs) {
32508 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
32509 "Shuffle vector size mismatch");
32510 if (WideSizeInBits > NewInput.getValueSizeInBits())
32511 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
32512 SDLoc(NewInput), WideSizeInBits);
32513 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
32514 "Unexpected subvector extraction");
32517 // Create new mask for larger type.
32518 for (unsigned i = 1; i != NumInputs; ++i)
32519 Offsets[i] += i * Scale * NumMaskElts;
32521 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
32522 for (int &M : WideMask) {
32525 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
32527 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
32529 // Remove unused/repeated shuffle source ops.
32530 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
32531 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
32533 if (WideInputs.size() > 2)
32536 // Increase depth for every upper subvector we've peeked through.
32537 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
32539 // Attempt to combine wider chain.
32540 // TODO: Can we use a better Root?
32541 SDValue WideRoot = WideInputs[0];
32542 if (SDValue WideShuffle = combineX86ShuffleChain(
32543 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
32544 AllowVariableMask, DAG, Subtarget)) {
32546 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
32547 return DAG.getBitcast(RootVT, WideShuffle);
32552 // Attempt to constant fold all of the constant source ops.
32553 // Returns true if the entire shuffle is folded to a constant.
32554 // TODO: Extend this to merge multiple constant Ops and update the mask.
32555 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
32556 ArrayRef<int> Mask, SDValue Root,
32557 bool HasVariableMask,
32559 const X86Subtarget &Subtarget) {
32560 MVT VT = Root.getSimpleValueType();
32562 unsigned SizeInBits = VT.getSizeInBits();
32563 unsigned NumMaskElts = Mask.size();
32564 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
32565 unsigned NumOps = Ops.size();
32567 // Extract constant bits from each source op.
32568 bool OneUseConstantOp = false;
32569 SmallVector<APInt, 16> UndefEltsOps(NumOps);
32570 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
32571 for (unsigned i = 0; i != NumOps; ++i) {
32572 SDValue SrcOp = Ops[i];
32573 OneUseConstantOp |= SrcOp.hasOneUse();
32574 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
32579 // Only fold if at least one of the constants is only used once or
32580 // the combined shuffle has included a variable mask shuffle, this
32581 // is to avoid constant pool bloat.
32582 if (!OneUseConstantOp && !HasVariableMask)
32585 // Shuffle the constant bits according to the mask.
32586 APInt UndefElts(NumMaskElts, 0);
32587 APInt ZeroElts(NumMaskElts, 0);
32588 APInt ConstantElts(NumMaskElts, 0);
32589 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
32590 APInt::getNullValue(MaskSizeInBits));
32591 for (unsigned i = 0; i != NumMaskElts; ++i) {
32593 if (M == SM_SentinelUndef) {
32594 UndefElts.setBit(i);
32596 } else if (M == SM_SentinelZero) {
32597 ZeroElts.setBit(i);
32600 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
32602 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
32603 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
32605 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
32606 if (SrcUndefElts[SrcMaskIdx]) {
32607 UndefElts.setBit(i);
32611 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
32612 APInt &Bits = SrcEltBits[SrcMaskIdx];
32614 ZeroElts.setBit(i);
32618 ConstantElts.setBit(i);
32619 ConstantBitData[i] = Bits;
32621 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
32623 // Create the constant data.
32625 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
32626 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
32628 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
32630 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
32633 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
32634 return DAG.getBitcast(VT, CstOp);
32637 /// Fully generic combining of x86 shuffle instructions.
32639 /// This should be the last combine run over the x86 shuffle instructions. Once
32640 /// they have been fully optimized, this will recursively consider all chains
32641 /// of single-use shuffle instructions, build a generic model of the cumulative
32642 /// shuffle operation, and check for simpler instructions which implement this
32643 /// operation. We use this primarily for two purposes:
32645 /// 1) Collapse generic shuffles to specialized single instructions when
32646 /// equivalent. In most cases, this is just an encoding size win, but
32647 /// sometimes we will collapse multiple generic shuffles into a single
32648 /// special-purpose shuffle.
32649 /// 2) Look for sequences of shuffle instructions with 3 or more total
32650 /// instructions, and replace them with the slightly more expensive SSSE3
32651 /// PSHUFB instruction if available. We do this as the last combining step
32652 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
32653 /// a suitable short sequence of other instructions. The PSHUFB will either
32654 /// use a register or have to read from memory and so is slightly (but only
32655 /// slightly) more expensive than the other shuffle instructions.
32657 /// Because this is inherently a quadratic operation (for each shuffle in
32658 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
32659 /// This should never be an issue in practice as the shuffle lowering doesn't
32660 /// produce sequences of more than 8 instructions.
32662 /// FIXME: We will currently miss some cases where the redundant shuffling
32663 /// would simplify under the threshold for PSHUFB formation because of
32664 /// combine-ordering. To fix this, we should do the redundant instruction
32665 /// combining in this recursive walk.
32666 static SDValue combineX86ShufflesRecursively(
32667 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
32668 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
32669 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32670 const X86Subtarget &Subtarget) {
32671 // Bound the depth of our recursive combine because this is ultimately
32672 // quadratic in nature.
32673 const unsigned MaxRecursionDepth = 8;
32674 if (Depth > MaxRecursionDepth)
32677 // Directly rip through bitcasts to find the underlying operand.
32678 SDValue Op = SrcOps[SrcOpIndex];
32679 Op = peekThroughOneUseBitcasts(Op);
32681 MVT VT = Op.getSimpleValueType();
32682 if (!VT.isVector())
32683 return SDValue(); // Bail if we hit a non-vector.
32685 assert(Root.getSimpleValueType().isVector() &&
32686 "Shuffles operate on vector types!");
32687 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
32688 "Can only combine shuffles of the same vector register size.");
32690 // Extract target shuffle mask and resolve sentinels and inputs.
32691 SmallVector<int, 64> OpMask;
32692 SmallVector<SDValue, 2> OpInputs;
32693 if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, DAG))
32696 // Add the inputs to the Ops list, avoiding duplicates.
32697 SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
32699 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
32700 // Attempt to find an existing match.
32701 SDValue InputBC = peekThroughBitcasts(Input);
32702 for (int i = 0, e = Ops.size(); i < e; ++i)
32703 if (InputBC == peekThroughBitcasts(Ops[i]))
32705 // Match failed - should we replace an existing Op?
32706 if (InsertionPoint >= 0) {
32707 Ops[InsertionPoint] = Input;
32708 return InsertionPoint;
32710 // Add to the end of the Ops list.
32711 Ops.push_back(Input);
32712 return Ops.size() - 1;
32715 SmallVector<int, 2> OpInputIdx;
32716 for (SDValue OpInput : OpInputs)
32717 OpInputIdx.push_back(AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
32719 assert(((RootMask.size() > OpMask.size() &&
32720 RootMask.size() % OpMask.size() == 0) ||
32721 (OpMask.size() > RootMask.size() &&
32722 OpMask.size() % RootMask.size() == 0) ||
32723 OpMask.size() == RootMask.size()) &&
32724 "The smaller number of elements must divide the larger.");
32726 // This function can be performance-critical, so we rely on the power-of-2
32727 // knowledge that we have about the mask sizes to replace div/rem ops with
32728 // bit-masks and shifts.
32729 assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
32730 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
32731 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
32732 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
32734 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
32735 unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
32736 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
32737 assert((RootRatio == 1 || OpRatio == 1) &&
32738 "Must not have a ratio for both incoming and op masks!");
32740 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
32741 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
32742 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
32743 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
32744 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
32746 SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
32748 // Merge this shuffle operation's mask into our accumulated mask. Note that
32749 // this shuffle's mask will be the first applied to the input, followed by the
32750 // root mask to get us all the way to the root value arrangement. The reason
32751 // for this order is that we are recursing up the operation chain.
32752 for (unsigned i = 0; i < MaskWidth; ++i) {
32753 unsigned RootIdx = i >> RootRatioLog2;
32754 if (RootMask[RootIdx] < 0) {
32755 // This is a zero or undef lane, we're done.
32756 Mask[i] = RootMask[RootIdx];
32760 unsigned RootMaskedIdx =
32762 ? RootMask[RootIdx]
32763 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
32765 // Just insert the scaled root mask value if it references an input other
32766 // than the SrcOp we're currently inserting.
32767 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
32768 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
32769 Mask[i] = RootMaskedIdx;
32773 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
32774 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
32775 if (OpMask[OpIdx] < 0) {
32776 // The incoming lanes are zero or undef, it doesn't matter which ones we
32778 Mask[i] = OpMask[OpIdx];
32782 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
32783 unsigned OpMaskedIdx =
32786 : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
32788 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
32789 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
32790 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
32791 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
32793 Mask[i] = OpMaskedIdx;
32796 // Handle the all undef/zero cases early.
32797 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
32798 return DAG.getUNDEF(Root.getValueType());
32800 // TODO - should we handle the mixed zero/undef case as well? Just returning
32801 // a zero mask will lose information on undef elements possibly reducing
32802 // future combine possibilities.
32803 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
32804 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
32807 // Remove unused/repeated shuffle source ops.
32808 resolveTargetShuffleInputsAndMask(Ops, Mask);
32809 assert(!Ops.empty() && "Shuffle with no inputs detected");
32811 HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
32813 // Update the list of shuffle nodes that have been combined so far.
32814 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
32816 CombinedNodes.push_back(Op.getNode());
32818 // See if we can recurse into each shuffle source op (if it's a target
32819 // shuffle). The source op should only be generally combined if it either has
32820 // a single use (i.e. current Op) or all its users have already been combined,
32821 // if not then we can still combine but should prevent generation of variable
32822 // shuffles to avoid constant pool bloat.
32823 // Don't recurse if we already have more source ops than we can combine in
32824 // the remaining recursion depth.
32825 if (Ops.size() < (MaxRecursionDepth - Depth)) {
32826 for (int i = 0, e = Ops.size(); i < e; ++i) {
32827 bool AllowVar = false;
32828 if (Ops[i].getNode()->hasOneUse() ||
32829 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
32830 AllowVar = AllowVariableMask;
32831 if (SDValue Res = combineX86ShufflesRecursively(
32832 Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
32833 AllowVar, DAG, Subtarget))
32838 // Attempt to constant fold all of the constant source ops.
32839 if (SDValue Cst = combineX86ShufflesConstants(
32840 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
32843 // We can only combine unary and binary shuffle mask cases.
32844 if (Ops.size() <= 2) {
32845 // Minor canonicalization of the accumulated shuffle mask to make it easier
32846 // to match below. All this does is detect masks with sequential pairs of
32847 // elements, and shrink them to the half-width mask. It does this in a loop
32848 // so it will reduce the size of the mask to the minimal width mask which
32849 // performs an equivalent shuffle.
32850 SmallVector<int, 64> WidenedMask;
32851 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
32852 Mask = std::move(WidenedMask);
32855 // Canonicalization of binary shuffle masks to improve pattern matching by
32856 // commuting the inputs.
32857 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
32858 ShuffleVectorSDNode::commuteMask(Mask);
32859 std::swap(Ops[0], Ops[1]);
32862 // Finally, try to combine into a single shuffle instruction.
32863 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
32864 AllowVariableMask, DAG, Subtarget);
32867 // If that failed and any input is extracted then try to combine as a
32868 // shuffle with the larger type.
32869 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
32870 HasVariableMask, AllowVariableMask,
32874 /// Helper entry wrapper to combineX86ShufflesRecursively.
32875 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
32876 const X86Subtarget &Subtarget) {
32877 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
32878 /*HasVarMask*/ false,
32879 /*AllowVarMask*/ true, DAG, Subtarget);
32882 /// Get the PSHUF-style mask from PSHUF node.
32884 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
32885 /// PSHUF-style masks that can be reused with such instructions.
32886 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
32887 MVT VT = N.getSimpleValueType();
32888 SmallVector<int, 4> Mask;
32889 SmallVector<SDValue, 2> Ops;
32892 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
32896 // If we have more than 128-bits, only the low 128-bits of shuffle mask
32897 // matter. Check that the upper masks are repeats and remove them.
32898 if (VT.getSizeInBits() > 128) {
32899 int LaneElts = 128 / VT.getScalarSizeInBits();
32901 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
32902 for (int j = 0; j < LaneElts; ++j)
32903 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
32904 "Mask doesn't repeat in high 128-bit lanes!");
32906 Mask.resize(LaneElts);
32909 switch (N.getOpcode()) {
32910 case X86ISD::PSHUFD:
32912 case X86ISD::PSHUFLW:
32915 case X86ISD::PSHUFHW:
32916 Mask.erase(Mask.begin(), Mask.begin() + 4);
32917 for (int &M : Mask)
32921 llvm_unreachable("No valid shuffle instruction found!");
32925 /// Search for a combinable shuffle across a chain ending in pshufd.
32927 /// We walk up the chain and look for a combinable shuffle, skipping over
32928 /// shuffles that we could hoist this shuffle's transformation past without
32929 /// altering anything.
32931 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
32932 SelectionDAG &DAG) {
32933 assert(N.getOpcode() == X86ISD::PSHUFD &&
32934 "Called with something other than an x86 128-bit half shuffle!");
32937 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
32938 // of the shuffles in the chain so that we can form a fresh chain to replace
32940 SmallVector<SDValue, 8> Chain;
32941 SDValue V = N.getOperand(0);
32942 for (; V.hasOneUse(); V = V.getOperand(0)) {
32943 switch (V.getOpcode()) {
32945 return SDValue(); // Nothing combined!
32948 // Skip bitcasts as we always know the type for the target specific
32952 case X86ISD::PSHUFD:
32953 // Found another dword shuffle.
32956 case X86ISD::PSHUFLW:
32957 // Check that the low words (being shuffled) are the identity in the
32958 // dword shuffle, and the high words are self-contained.
32959 if (Mask[0] != 0 || Mask[1] != 1 ||
32960 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
32963 Chain.push_back(V);
32966 case X86ISD::PSHUFHW:
32967 // Check that the high words (being shuffled) are the identity in the
32968 // dword shuffle, and the low words are self-contained.
32969 if (Mask[2] != 2 || Mask[3] != 3 ||
32970 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
32973 Chain.push_back(V);
32976 case X86ISD::UNPCKL:
32977 case X86ISD::UNPCKH:
32978 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
32979 // shuffle into a preceding word shuffle.
32980 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
32981 V.getSimpleValueType().getVectorElementType() != MVT::i16)
32984 // Search for a half-shuffle which we can combine with.
32985 unsigned CombineOp =
32986 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
32987 if (V.getOperand(0) != V.getOperand(1) ||
32988 !V->isOnlyUserOf(V.getOperand(0).getNode()))
32990 Chain.push_back(V);
32991 V = V.getOperand(0);
32993 switch (V.getOpcode()) {
32995 return SDValue(); // Nothing to combine.
32997 case X86ISD::PSHUFLW:
32998 case X86ISD::PSHUFHW:
32999 if (V.getOpcode() == CombineOp)
33002 Chain.push_back(V);
33006 V = V.getOperand(0);
33010 } while (V.hasOneUse());
33013 // Break out of the loop if we break out of the switch.
33017 if (!V.hasOneUse())
33018 // We fell out of the loop without finding a viable combining instruction.
33021 // Merge this node's mask and our incoming mask.
33022 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33023 for (int &M : Mask)
33025 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
33026 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
33028 // Rebuild the chain around this new shuffle.
33029 while (!Chain.empty()) {
33030 SDValue W = Chain.pop_back_val();
33032 if (V.getValueType() != W.getOperand(0).getValueType())
33033 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
33035 switch (W.getOpcode()) {
33037 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
33039 case X86ISD::UNPCKL:
33040 case X86ISD::UNPCKH:
33041 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
33044 case X86ISD::PSHUFD:
33045 case X86ISD::PSHUFLW:
33046 case X86ISD::PSHUFHW:
33047 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
33051 if (V.getValueType() != N.getValueType())
33052 V = DAG.getBitcast(N.getValueType(), V);
33054 // Return the new chain to replace N.
33058 /// Try to combine x86 target specific shuffles.
33059 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
33060 TargetLowering::DAGCombinerInfo &DCI,
33061 const X86Subtarget &Subtarget) {
33063 MVT VT = N.getSimpleValueType();
33064 SmallVector<int, 4> Mask;
33065 unsigned Opcode = N.getOpcode();
33067 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
33068 // single instruction.
33069 if (VT.getScalarSizeInBits() == 64 &&
33070 (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
33071 Opcode == X86ISD::UNPCKL)) {
33072 auto BC0 = peekThroughBitcasts(N.getOperand(0));
33073 auto BC1 = peekThroughBitcasts(N.getOperand(1));
33074 EVT VT0 = BC0.getValueType();
33075 EVT VT1 = BC1.getValueType();
33076 unsigned Opcode0 = BC0.getOpcode();
33077 unsigned Opcode1 = BC1.getOpcode();
33078 if (Opcode0 == Opcode1 && VT0 == VT1 &&
33079 (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
33080 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
33081 Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
33083 if (Opcode == X86ISD::MOVSD) {
33084 Lo = BC1.getOperand(0);
33085 Hi = BC0.getOperand(1);
33087 Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33088 Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33090 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
33091 return DAG.getBitcast(VT, Horiz);
33096 case X86ISD::VBROADCAST: {
33097 SDValue Src = N.getOperand(0);
33098 SDValue BC = peekThroughBitcasts(Src);
33099 EVT SrcVT = Src.getValueType();
33100 EVT BCVT = BC.getValueType();
33102 // If broadcasting from another shuffle, attempt to simplify it.
33103 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
33104 if (isTargetShuffle(BC.getOpcode()) &&
33105 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
33106 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
33107 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
33109 for (unsigned i = 0; i != Scale; ++i)
33110 DemandedMask[i] = i;
33111 if (SDValue Res = combineX86ShufflesRecursively(
33112 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1,
33113 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
33114 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33115 DAG.getBitcast(SrcVT, Res));
33118 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
33119 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
33120 if (Src.getOpcode() == ISD::BITCAST &&
33121 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
33122 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
33123 VT.getVectorNumElements());
33124 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
33127 // Reduce broadcast source vector to lowest 128-bits.
33128 if (SrcVT.getSizeInBits() > 128)
33129 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33130 extract128BitVector(Src, 0, DAG, DL));
33132 // broadcast(scalar_to_vector(x)) -> broadcast(x).
33133 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
33134 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
33136 // Share broadcast with the longest vector and extract low subvector (free).
33137 for (SDNode *User : Src->uses())
33138 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
33139 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
33140 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
33141 VT.getSizeInBits());
33146 case X86ISD::BLENDI: {
33147 SDValue N0 = N.getOperand(0);
33148 SDValue N1 = N.getOperand(1);
33150 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
33151 // TODO: Handle MVT::v16i16 repeated blend mask.
33152 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
33153 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
33154 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
33155 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
33156 SrcVT.getScalarSizeInBits() >= 32) {
33157 unsigned Mask = N.getConstantOperandVal(2);
33158 unsigned Size = VT.getVectorNumElements();
33159 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
33160 unsigned ScaleMask = scaleVectorShuffleBlendMask(Mask, Size, Scale);
33161 return DAG.getBitcast(
33162 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
33164 DAG.getConstant(ScaleMask, DL, MVT::i8)));
33169 case X86ISD::VPERMI: {
33170 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
33171 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
33172 SDValue N0 = N.getOperand(0);
33173 SDValue N1 = N.getOperand(1);
33174 unsigned EltSizeInBits = VT.getScalarSizeInBits();
33175 if (N0.getOpcode() == ISD::BITCAST &&
33176 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
33177 SDValue Src = N0.getOperand(0);
33178 EVT SrcVT = Src.getValueType();
33179 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
33180 return DAG.getBitcast(VT, Res);
33184 case X86ISD::PSHUFD:
33185 case X86ISD::PSHUFLW:
33186 case X86ISD::PSHUFHW:
33187 Mask = getPSHUFShuffleMask(N);
33188 assert(Mask.size() == 4);
33190 case X86ISD::MOVSD:
33191 case X86ISD::MOVSS: {
33192 SDValue N0 = N.getOperand(0);
33193 SDValue N1 = N.getOperand(1);
33195 // Canonicalize scalar FPOps:
33196 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
33197 // If commutable, allow OP(N1[0], N0[0]).
33198 unsigned Opcode1 = N1.getOpcode();
33199 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
33200 Opcode1 == ISD::FDIV) {
33201 SDValue N10 = N1.getOperand(0);
33202 SDValue N11 = N1.getOperand(1);
33204 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
33206 std::swap(N10, N11);
33207 MVT SVT = VT.getVectorElementType();
33208 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
33209 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
33210 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
33211 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
33212 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
33213 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
33219 case X86ISD::INSERTPS: {
33220 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
33221 SDValue Op0 = N.getOperand(0);
33222 SDValue Op1 = N.getOperand(1);
33223 SDValue Op2 = N.getOperand(2);
33224 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
33225 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
33226 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
33227 unsigned ZeroMask = InsertPSMask & 0xF;
33229 // If we zero out all elements from Op0 then we don't need to reference it.
33230 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
33231 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
33232 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33234 // If we zero out the element from Op1 then we don't need to reference it.
33235 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
33236 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33237 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33239 // Attempt to merge insertps Op1 with an inner target shuffle node.
33240 SmallVector<int, 8> TargetMask1;
33241 SmallVector<SDValue, 2> Ops1;
33242 if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
33243 int M = TargetMask1[SrcIdx];
33244 if (isUndefOrZero(M)) {
33245 // Zero/UNDEF insertion - zero out element and remove dependency.
33246 InsertPSMask |= (1u << DstIdx);
33247 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33248 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33250 // Update insertps mask srcidx and reference the source input directly.
33251 assert(0 <= M && M < 8 && "Shuffle index out of range");
33252 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
33253 Op1 = Ops1[M < 4 ? 0 : 1];
33254 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33255 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33258 // Attempt to merge insertps Op0 with an inner target shuffle node.
33259 SmallVector<int, 8> TargetMask0;
33260 SmallVector<SDValue, 2> Ops0;
33261 if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
33264 bool Updated = false;
33265 bool UseInput00 = false;
33266 bool UseInput01 = false;
33267 for (int i = 0; i != 4; ++i) {
33268 int M = TargetMask0[i];
33269 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
33270 // No change if element is already zero or the inserted element.
33272 } else if (isUndefOrZero(M)) {
33273 // If the target mask is undef/zero then we must zero the element.
33274 InsertPSMask |= (1u << i);
33279 // The input vector element must be inline.
33280 if (M != i && M != (i + 4))
33283 // Determine which inputs of the target shuffle we're using.
33284 UseInput00 |= (0 <= M && M < 4);
33285 UseInput01 |= (4 <= M);
33288 // If we're not using both inputs of the target shuffle then use the
33289 // referenced input directly.
33290 if (UseInput00 && !UseInput01) {
33293 } else if (!UseInput00 && UseInput01) {
33299 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33300 DAG.getConstant(InsertPSMask, DL, MVT::i8));
33308 // Nuke no-op shuffles that show up after combining.
33309 if (isNoopShuffleMask(Mask))
33310 return N.getOperand(0);
33312 // Look for simplifications involving one or two shuffle instructions.
33313 SDValue V = N.getOperand(0);
33314 switch (N.getOpcode()) {
33317 case X86ISD::PSHUFLW:
33318 case X86ISD::PSHUFHW:
33319 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
33321 // See if this reduces to a PSHUFD which is no more expensive and can
33322 // combine with more operations. Note that it has to at least flip the
33323 // dwords as otherwise it would have been removed as a no-op.
33324 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
33325 int DMask[] = {0, 1, 2, 3};
33326 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
33327 DMask[DOffset + 0] = DOffset + 1;
33328 DMask[DOffset + 1] = DOffset + 0;
33329 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
33330 V = DAG.getBitcast(DVT, V);
33331 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
33332 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
33333 return DAG.getBitcast(VT, V);
33336 // Look for shuffle patterns which can be implemented as a single unpack.
33337 // FIXME: This doesn't handle the location of the PSHUFD generically, and
33338 // only works when we have a PSHUFD followed by two half-shuffles.
33339 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
33340 (V.getOpcode() == X86ISD::PSHUFLW ||
33341 V.getOpcode() == X86ISD::PSHUFHW) &&
33342 V.getOpcode() != N.getOpcode() &&
33344 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
33345 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
33346 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33347 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
33348 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33349 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33351 for (int i = 0; i < 4; ++i) {
33352 WordMask[i + NOffset] = Mask[i] + NOffset;
33353 WordMask[i + VOffset] = VMask[i] + VOffset;
33355 // Map the word mask through the DWord mask.
33357 for (int i = 0; i < 8; ++i)
33358 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
33359 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
33360 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
33361 // We can replace all three shuffles with an unpack.
33362 V = DAG.getBitcast(VT, D.getOperand(0));
33363 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
33372 case X86ISD::PSHUFD:
33373 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
33382 /// Checks if the shuffle mask takes subsequent elements
33383 /// alternately from two vectors.
33384 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
33385 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
33387 int ParitySrc[2] = {-1, -1};
33388 unsigned Size = Mask.size();
33389 for (unsigned i = 0; i != Size; ++i) {
33394 // Make sure we are using the matching element from the input.
33395 if ((M % Size) != i)
33398 // Make sure we use the same input for all elements of the same parity.
33399 int Src = M / Size;
33400 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
33402 ParitySrc[i % 2] = Src;
33405 // Make sure each input is used.
33406 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
33409 Op0Even = ParitySrc[0] == 0;
33413 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
33414 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
33415 /// are written to the parameters \p Opnd0 and \p Opnd1.
33417 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
33418 /// so it is easier to generically match. We also insert dummy vector shuffle
33419 /// nodes for the operands which explicitly discard the lanes which are unused
33420 /// by this operation to try to flow through the rest of the combiner
33421 /// the fact that they're unused.
33422 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
33423 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
33426 EVT VT = N->getValueType(0);
33427 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33428 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
33429 !VT.getSimpleVT().isFloatingPoint())
33432 // We only handle target-independent shuffles.
33433 // FIXME: It would be easy and harmless to use the target shuffle mask
33434 // extraction tool to support more.
33435 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33438 SDValue V1 = N->getOperand(0);
33439 SDValue V2 = N->getOperand(1);
33441 // Make sure we have an FADD and an FSUB.
33442 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
33443 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
33444 V1.getOpcode() == V2.getOpcode())
33447 // If there are other uses of these operations we can't fold them.
33448 if (!V1->hasOneUse() || !V2->hasOneUse())
33451 // Ensure that both operations have the same operands. Note that we can
33452 // commute the FADD operands.
33454 if (V1.getOpcode() == ISD::FSUB) {
33455 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
33456 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
33457 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
33460 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
33461 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
33462 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
33463 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
33467 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33469 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33472 // It's a subadd if the vector in the even parity is an FADD.
33473 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
33474 : V2->getOpcode() == ISD::FADD;
33481 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
33482 static SDValue combineShuffleToFMAddSub(SDNode *N,
33483 const X86Subtarget &Subtarget,
33484 SelectionDAG &DAG) {
33485 // We only handle target-independent shuffles.
33486 // FIXME: It would be easy and harmless to use the target shuffle mask
33487 // extraction tool to support more.
33488 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33491 MVT VT = N->getSimpleValueType(0);
33492 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33493 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
33496 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
33497 SDValue Op0 = N->getOperand(0);
33498 SDValue Op1 = N->getOperand(1);
33499 SDValue FMAdd = Op0, FMSub = Op1;
33500 if (FMSub.getOpcode() != X86ISD::FMSUB)
33501 std::swap(FMAdd, FMSub);
33503 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
33504 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
33505 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
33506 FMAdd.getOperand(2) != FMSub.getOperand(2))
33509 // Check for correct shuffle mask.
33510 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33512 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33515 // FMAddSub takes zeroth operand from FMSub node.
33517 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
33518 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33519 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
33520 FMAdd.getOperand(2));
33523 /// Try to combine a shuffle into a target-specific add-sub or
33524 /// mul-add-sub node.
33525 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
33526 const X86Subtarget &Subtarget,
33527 SelectionDAG &DAG) {
33528 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
33531 SDValue Opnd0, Opnd1;
33533 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
33536 MVT VT = N->getSimpleValueType(0);
33539 // Try to generate X86ISD::FMADDSUB node here.
33541 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
33542 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33543 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
33549 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
33550 // the ADDSUB idiom has been successfully recognized. There are no known
33551 // X86 targets with 512-bit ADDSUB instructions!
33552 if (VT.is512BitVector())
33555 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
33558 // We are looking for a shuffle where both sources are concatenated with undef
33559 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
33560 // if we can express this as a single-source shuffle, that's preferable.
33561 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
33562 const X86Subtarget &Subtarget) {
33563 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
33566 EVT VT = N->getValueType(0);
33568 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
33569 if (!VT.is128BitVector() && !VT.is256BitVector())
33572 if (VT.getVectorElementType() != MVT::i32 &&
33573 VT.getVectorElementType() != MVT::i64 &&
33574 VT.getVectorElementType() != MVT::f32 &&
33575 VT.getVectorElementType() != MVT::f64)
33578 SDValue N0 = N->getOperand(0);
33579 SDValue N1 = N->getOperand(1);
33581 // Check that both sources are concats with undef.
33582 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
33583 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
33584 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
33585 !N1.getOperand(1).isUndef())
33588 // Construct the new shuffle mask. Elements from the first source retain their
33589 // index, but elements from the second source no longer need to skip an undef.
33590 SmallVector<int, 8> Mask;
33591 int NumElts = VT.getVectorNumElements();
33593 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33594 for (int Elt : SVOp->getMask())
33595 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
33598 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
33600 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
33603 /// Eliminate a redundant shuffle of a horizontal math op.
33604 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
33605 unsigned Opcode = N->getOpcode();
33606 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
33607 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
33610 // For a broadcast, peek through an extract element of index 0 to find the
33611 // horizontal op: broadcast (ext_vec_elt HOp, 0)
33612 EVT VT = N->getValueType(0);
33613 if (Opcode == X86ISD::VBROADCAST) {
33614 SDValue SrcOp = N->getOperand(0);
33615 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
33616 SrcOp.getValueType() == MVT::f64 &&
33617 SrcOp.getOperand(0).getValueType() == VT &&
33618 isNullConstant(SrcOp.getOperand(1)))
33619 N = SrcOp.getNode();
33622 SDValue HOp = N->getOperand(0);
33623 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
33624 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
33627 // 128-bit horizontal math instructions are defined to operate on adjacent
33628 // lanes of each operand as:
33629 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
33630 // ...similarly for v2f64 and v8i16.
33631 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
33632 HOp.getOperand(0) != HOp.getOperand(1))
33635 // The shuffle that we are eliminating may have allowed the horizontal op to
33636 // have an undemanded (undefined) operand. Duplicate the other (defined)
33637 // operand to ensure that the results are defined across all lanes without the
33639 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
33641 if (HorizOp.getOperand(0).isUndef()) {
33642 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
33643 X = HorizOp.getOperand(1);
33644 } else if (HorizOp.getOperand(1).isUndef()) {
33645 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
33646 X = HorizOp.getOperand(0);
33650 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
33651 HorizOp.getValueType(), X, X);
33654 // When the operands of a horizontal math op are identical, the low half of
33655 // the result is the same as the high half. If a target shuffle is also
33656 // replicating low and high halves (and without changing the type/length of
33657 // the vector), we don't need the shuffle.
33658 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
33659 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
33660 // movddup (hadd X, X) --> hadd X, X
33661 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
33662 assert((HOp.getValueType() == MVT::v2f64 ||
33663 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
33664 return updateHOp(HOp, DAG);
33669 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
33670 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33671 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
33672 // but this should be tied to whatever horizontal op matching and shuffle
33673 // canonicalization are producing.
33674 if (HOp.getValueSizeInBits() == 128 &&
33675 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
33676 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
33677 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
33678 return updateHOp(HOp, DAG);
33680 if (HOp.getValueSizeInBits() == 256 &&
33681 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
33682 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
33683 isTargetShuffleEquivalent(
33684 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
33685 return updateHOp(HOp, DAG);
33690 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
33691 /// low half of each source vector and does not set any high half elements in
33692 /// the destination vector, narrow the shuffle to half its original size.
33693 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
33694 if (!Shuf->getValueType(0).isSimple())
33696 MVT VT = Shuf->getSimpleValueType(0);
33697 if (!VT.is256BitVector() && !VT.is512BitVector())
33700 // See if we can ignore all of the high elements of the shuffle.
33701 ArrayRef<int> Mask = Shuf->getMask();
33702 if (!isUndefUpperHalf(Mask))
33705 // Check if the shuffle mask accesses only the low half of each input vector
33706 // (half-index output is 0 or 2).
33707 int HalfIdx1, HalfIdx2;
33708 SmallVector<int, 8> HalfMask(Mask.size() / 2);
33709 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
33710 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
33713 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
33714 // The trick is knowing that all of the insert/extract are actually free
33715 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
33716 // of narrow inputs into a narrow output, and that is always cheaper than
33717 // the wide shuffle that we started with.
33718 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
33719 Shuf->getOperand(1), HalfMask, HalfIdx1,
33720 HalfIdx2, false, DAG);
33723 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
33724 TargetLowering::DAGCombinerInfo &DCI,
33725 const X86Subtarget &Subtarget) {
33726 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
33727 if (SDValue V = narrowShuffle(Shuf, DAG))
33730 // If we have legalized the vector types, look for blends of FADD and FSUB
33731 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
33733 EVT VT = N->getValueType(0);
33734 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33735 if (TLI.isTypeLegal(VT)) {
33736 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
33739 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
33743 // During Type Legalization, when promoting illegal vector types,
33744 // the backend might introduce new shuffle dag nodes and bitcasts.
33746 // This code performs the following transformation:
33747 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
33748 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
33750 // We do this only if both the bitcast and the BINOP dag nodes have
33751 // one use. Also, perform this transformation only if the new binary
33752 // operation is legal. This is to avoid introducing dag nodes that
33753 // potentially need to be further expanded (or custom lowered) into a
33754 // less optimal sequence of dag nodes.
33755 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
33756 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
33757 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33758 N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
33759 SDValue N0 = N->getOperand(0);
33760 SDValue N1 = N->getOperand(1);
33762 SDValue BC0 = N0.getOperand(0);
33763 EVT SVT = BC0.getValueType();
33764 unsigned Opcode = BC0.getOpcode();
33765 unsigned NumElts = VT.getVectorNumElements();
33767 if (BC0.hasOneUse() && SVT.isVector() &&
33768 SVT.getVectorNumElements() * 2 == NumElts &&
33769 TLI.isOperationLegal(Opcode, VT)) {
33770 bool CanFold = false;
33776 // isOperationLegal lies for integer ops on floating point types.
33777 CanFold = VT.isInteger();
33782 // isOperationLegal lies for floating point ops on integer types.
33783 CanFold = VT.isFloatingPoint();
33787 unsigned SVTNumElts = SVT.getVectorNumElements();
33788 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33789 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
33790 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
33791 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
33792 CanFold = SVOp->getMaskElt(i) < 0;
33795 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
33796 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
33797 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
33798 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
33803 // Attempt to combine into a vector load/broadcast.
33804 if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
33807 // For AVX2, we sometimes want to combine
33808 // (vector_shuffle <mask> (concat_vectors t1, undef)
33809 // (concat_vectors t2, undef))
33811 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
33812 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
33813 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
33816 if (isTargetShuffle(N->getOpcode())) {
33818 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
33821 // Try recursively combining arbitrary sequences of x86 shuffle
33822 // instructions into higher-order shuffles. We do this after combining
33823 // specific PSHUF instruction sequences into their minimal form so that we
33824 // can evaluate how many specialized shuffle instructions are involved in
33825 // a particular chain.
33826 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
33829 // Simplify source operands based on shuffle mask.
33830 // TODO - merge this into combineX86ShufflesRecursively.
33831 APInt KnownUndef, KnownZero;
33832 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
33833 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
33834 return SDValue(N, 0);
33837 // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
33838 // in the upper 64 bits.
33839 // TODO: Can we generalize this using computeKnownBits.
33840 if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
33841 (VT == MVT::v2f64 || VT == MVT::v2i64) &&
33842 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33843 (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
33844 N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
33845 SDValue In = N->getOperand(0).getOperand(0);
33846 switch (In.getOpcode()) {
33849 case X86ISD::CVTP2SI: case X86ISD::CVTP2UI:
33850 case X86ISD::MCVTP2SI: case X86ISD::MCVTP2UI:
33851 case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI:
33852 case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
33853 case X86ISD::CVTSI2P: case X86ISD::CVTUI2P:
33854 case X86ISD::MCVTSI2P: case X86ISD::MCVTUI2P:
33855 case X86ISD::VFPROUND: case X86ISD::VMFPROUND:
33856 if (In.getOperand(0).getValueType() == MVT::v2f64 ||
33857 In.getOperand(0).getValueType() == MVT::v2i64)
33858 return N->getOperand(0); // return the bitcast
33863 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
33864 // insert into a zero vector. This helps get VZEXT_MOVL closer to
33865 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
33866 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
33867 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
33868 N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
33869 N->getOperand(0).hasOneUse() &&
33870 N->getOperand(0).getOperand(0).isUndef() &&
33871 isNullConstant(N->getOperand(0).getOperand(2))) {
33872 SDValue In = N->getOperand(0).getOperand(1);
33873 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
33874 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
33875 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
33876 Movl, N->getOperand(0).getOperand(2));
33879 // If this a vzmovl of a full vector load, replace it with a vzload, unless
33880 // the load is volatile.
33881 if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
33882 ISD::isNormalLoad(N->getOperand(0).getNode())) {
33883 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
33884 if (!LN->isVolatile()) {
33885 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33886 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
33888 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33889 VT.getVectorElementType(),
33890 LN->getPointerInfo(),
33891 LN->getAlignment(),
33892 MachineMemOperand::MOLoad);
33893 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
33899 // Look for a truncating shuffle to v2i32 of a PMULUDQ where one of the
33900 // operands is an extend from v2i32 to v2i64. Turn it into a pmulld.
33901 // FIXME: This can probably go away once we default to widening legalization.
33902 if (Subtarget.hasSSE41() && VT == MVT::v4i32 &&
33903 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
33904 N->getOperand(0).getOpcode() == ISD::BITCAST &&
33905 N->getOperand(0).getOperand(0).getOpcode() == X86ISD::PMULUDQ) {
33906 SDValue BC = N->getOperand(0);
33907 SDValue MULUDQ = BC.getOperand(0);
33908 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33909 ArrayRef<int> Mask = SVOp->getMask();
33910 if (BC.hasOneUse() && MULUDQ.hasOneUse() &&
33911 Mask[0] == 0 && Mask[1] == 2 && Mask[2] == -1 && Mask[3] == -1) {
33912 SDValue Op0 = MULUDQ.getOperand(0);
33913 SDValue Op1 = MULUDQ.getOperand(1);
33914 if (Op0.getOpcode() == ISD::BITCAST &&
33915 Op0.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
33916 Op0.getOperand(0).getValueType() == MVT::v4i32) {
33917 ShuffleVectorSDNode *SVOp0 =
33918 cast<ShuffleVectorSDNode>(Op0.getOperand(0));
33919 ArrayRef<int> Mask2 = SVOp0->getMask();
33920 if (Mask2[0] == 0 && Mask2[1] == -1 &&
33921 Mask2[2] == 1 && Mask2[3] == -1) {
33922 Op0 = SVOp0->getOperand(0);
33923 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
33924 Op1 = DAG.getVectorShuffle(MVT::v4i32, dl, Op1, Op1, Mask);
33925 return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
33928 if (Op1.getOpcode() == ISD::BITCAST &&
33929 Op1.getOperand(0).getOpcode() == ISD::VECTOR_SHUFFLE &&
33930 Op1.getOperand(0).getValueType() == MVT::v4i32) {
33931 ShuffleVectorSDNode *SVOp1 =
33932 cast<ShuffleVectorSDNode>(Op1.getOperand(0));
33933 ArrayRef<int> Mask2 = SVOp1->getMask();
33934 if (Mask2[0] == 0 && Mask2[1] == -1 &&
33935 Mask2[2] == 1 && Mask2[3] == -1) {
33936 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
33937 Op0 = DAG.getVectorShuffle(MVT::v4i32, dl, Op0, Op0, Mask);
33938 Op1 = SVOp1->getOperand(0);
33939 return DAG.getNode(ISD::MUL, dl, MVT::v4i32, Op0, Op1);
33948 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
33949 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
33950 TargetLoweringOpt &TLO, unsigned Depth) const {
33951 int NumElts = DemandedElts.getBitWidth();
33952 unsigned Opc = Op.getOpcode();
33953 EVT VT = Op.getValueType();
33955 // Handle special case opcodes.
33957 case X86ISD::PMULDQ:
33958 case X86ISD::PMULUDQ: {
33959 APInt LHSUndef, LHSZero;
33960 APInt RHSUndef, RHSZero;
33961 SDValue LHS = Op.getOperand(0);
33962 SDValue RHS = Op.getOperand(1);
33963 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
33966 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
33969 // Multiply by zero.
33970 KnownZero = LHSZero | RHSZero;
33975 case X86ISD::VSRA: {
33976 // We only need the bottom 64-bits of the (128-bit) shift amount.
33977 SDValue Amt = Op.getOperand(1);
33978 MVT AmtVT = Amt.getSimpleValueType();
33979 assert(AmtVT.is128BitVector() && "Unexpected value type");
33981 // If we reuse the shift amount just for sse shift amounts then we know that
33982 // only the bottom 64-bits are only ever used.
33983 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
33984 unsigned UseOpc = Use->getOpcode();
33985 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
33986 UseOpc == X86ISD::VSRA) &&
33987 Use->getOperand(0) != Amt;
33990 APInt AmtUndef, AmtZero;
33991 unsigned NumAmtElts = AmtVT.getVectorNumElements();
33992 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
33993 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
33994 Depth + 1, AssumeSingleUse))
33998 case X86ISD::VSHLI:
33999 case X86ISD::VSRLI:
34000 case X86ISD::VSRAI: {
34001 SDValue Src = Op.getOperand(0);
34003 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
34006 // TODO convert SrcUndef to KnownUndef.
34009 case X86ISD::CVTSI2P:
34010 case X86ISD::CVTUI2P: {
34011 SDValue Src = Op.getOperand(0);
34012 MVT SrcVT = Src.getSimpleValueType();
34013 APInt SrcUndef, SrcZero;
34014 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34015 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34020 case X86ISD::PACKSS:
34021 case X86ISD::PACKUS: {
34022 APInt DemandedLHS, DemandedRHS;
34023 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34025 APInt SrcUndef, SrcZero;
34026 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, SrcUndef,
34027 SrcZero, TLO, Depth + 1))
34029 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, SrcUndef,
34030 SrcZero, TLO, Depth + 1))
34036 case X86ISD::FHADD:
34037 case X86ISD::FHSUB: {
34038 APInt DemandedLHS, DemandedRHS;
34039 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34041 APInt LHSUndef, LHSZero;
34042 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
34043 LHSZero, TLO, Depth + 1))
34045 APInt RHSUndef, RHSZero;
34046 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
34047 RHSZero, TLO, Depth + 1))
34051 case X86ISD::VTRUNC:
34052 case X86ISD::VTRUNCS:
34053 case X86ISD::VTRUNCUS: {
34054 SDValue Src = Op.getOperand(0);
34055 MVT SrcVT = Src.getSimpleValueType();
34056 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34057 APInt SrcUndef, SrcZero;
34058 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
34061 KnownZero = SrcZero.zextOrTrunc(NumElts);
34062 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
34065 case X86ISD::BLENDV: {
34066 APInt SelUndef, SelZero;
34067 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
34068 SelZero, TLO, Depth + 1))
34071 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
34072 APInt LHSUndef, LHSZero;
34073 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
34074 LHSZero, TLO, Depth + 1))
34077 APInt RHSUndef, RHSZero;
34078 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
34079 RHSZero, TLO, Depth + 1))
34082 KnownZero = LHSZero & RHSZero;
34083 KnownUndef = LHSUndef & RHSUndef;
34086 case X86ISD::VBROADCAST: {
34087 SDValue Src = Op.getOperand(0);
34088 MVT SrcVT = Src.getSimpleValueType();
34089 if (!SrcVT.isVector())
34091 // Don't bother broadcasting if we just need the 0'th element.
34092 if (DemandedElts == 1) {
34093 if (Src.getValueType() != VT)
34094 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
34096 return TLO.CombineTo(Op, Src);
34098 APInt SrcUndef, SrcZero;
34099 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
34100 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34105 case X86ISD::VPERMV: {
34106 SDValue Mask = Op.getOperand(0);
34107 APInt MaskUndef, MaskZero;
34108 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34113 case X86ISD::PSHUFB:
34114 case X86ISD::VPERMV3:
34115 case X86ISD::VPERMILPV: {
34116 SDValue Mask = Op.getOperand(1);
34117 APInt MaskUndef, MaskZero;
34118 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34123 case X86ISD::VPPERM:
34124 case X86ISD::VPERMIL2: {
34125 SDValue Mask = Op.getOperand(2);
34126 APInt MaskUndef, MaskZero;
34127 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34134 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
34135 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
34136 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
34137 if ((VT.is256BitVector() || VT.is512BitVector()) &&
34138 DemandedElts.lshr(NumElts / 2) == 0) {
34139 unsigned SizeInBits = VT.getSizeInBits();
34140 unsigned ExtSizeInBits = SizeInBits / 2;
34142 // See if 512-bit ops only use the bottom 128-bits.
34143 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
34144 ExtSizeInBits = SizeInBits / 4;
34147 // Zero upper elements.
34148 case X86ISD::VZEXT_MOVL: {
34151 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34153 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
34154 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34156 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34157 return TLO.CombineTo(Op, Insert);
34159 // Subvector broadcast.
34160 case X86ISD::SUBV_BROADCAST: {
34162 SDValue Src = Op.getOperand(0);
34163 if (Src.getValueSizeInBits() > ExtSizeInBits)
34164 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
34165 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
34166 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
34168 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
34169 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
34171 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
34172 TLO.DAG, DL, ExtSizeInBits));
34174 // Byte shifts by immediate.
34175 case X86ISD::VSHLDQ:
34176 case X86ISD::VSRLDQ:
34177 // Shift by uniform.
34181 // Shift by immediate.
34182 case X86ISD::VSHLI:
34183 case X86ISD::VSRLI:
34184 case X86ISD::VSRAI: {
34187 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34189 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
34190 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34192 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34193 return TLO.CombineTo(Op, Insert);
34195 case X86ISD::VPERMI: {
34196 // Simplify PERMPD/PERMQ to extract_subvector.
34197 // TODO: This should be done in shuffle combining.
34198 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
34199 SmallVector<int, 4> Mask;
34200 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
34201 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
34203 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
34204 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34205 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
34206 return TLO.CombineTo(Op, Insert);
34211 // Target Shuffles.
34212 case X86ISD::PSHUFB:
34213 case X86ISD::UNPCKL:
34214 case X86ISD::UNPCKH:
34215 // Saturated Packs.
34216 case X86ISD::PACKSS:
34217 case X86ISD::PACKUS:
34221 case X86ISD::FHADD:
34222 case X86ISD::FHSUB: {
34224 MVT ExtVT = VT.getSimpleVT();
34225 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
34226 ExtSizeInBits / ExtVT.getScalarSizeInBits());
34228 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34230 extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
34231 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
34232 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34234 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34235 return TLO.CombineTo(Op, Insert);
34240 // Simplify target shuffles.
34241 if (!isTargetShuffle(Opc) || !VT.isSimple())
34244 // Get target shuffle mask.
34246 SmallVector<int, 64> OpMask;
34247 SmallVector<SDValue, 2> OpInputs;
34248 if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs,
34252 // Shuffle inputs must be the same type as the result.
34253 if (llvm::any_of(OpInputs,
34254 [VT](SDValue V) { return VT != V.getValueType(); }))
34257 // Clear known elts that might have been set above.
34258 KnownZero.clearAllBits();
34259 KnownUndef.clearAllBits();
34261 // Check if shuffle mask can be simplified to undef/zero/identity.
34262 int NumSrcs = OpInputs.size();
34263 for (int i = 0; i != NumElts; ++i) {
34264 int &M = OpMask[i];
34265 if (!DemandedElts[i])
34266 M = SM_SentinelUndef;
34267 else if (0 <= M && OpInputs[M / NumElts].isUndef())
34268 M = SM_SentinelUndef;
34271 if (isUndefInRange(OpMask, 0, NumElts)) {
34272 KnownUndef.setAllBits();
34273 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
34275 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
34276 KnownZero.setAllBits();
34277 return TLO.CombineTo(
34278 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
34280 for (int Src = 0; Src != NumSrcs; ++Src)
34281 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
34282 return TLO.CombineTo(Op, OpInputs[Src]);
34284 // Attempt to simplify inputs.
34285 for (int Src = 0; Src != NumSrcs; ++Src) {
34286 int Lo = Src * NumElts;
34287 APInt SrcElts = APInt::getNullValue(NumElts);
34288 for (int i = 0; i != NumElts; ++i)
34289 if (DemandedElts[i]) {
34290 int M = OpMask[i] - Lo;
34291 if (0 <= M && M < NumElts)
34295 APInt SrcUndef, SrcZero;
34296 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
34301 // Extract known zero/undef elements.
34302 // TODO - Propagate input undef/zero elts.
34303 for (int i = 0; i != NumElts; ++i) {
34304 if (OpMask[i] == SM_SentinelUndef)
34305 KnownUndef.setBit(i);
34306 if (OpMask[i] == SM_SentinelZero)
34307 KnownZero.setBit(i);
34313 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
34314 SDValue Op, const APInt &OriginalDemandedBits,
34315 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
34316 unsigned Depth) const {
34317 EVT VT = Op.getValueType();
34318 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
34319 unsigned Opc = Op.getOpcode();
34321 case X86ISD::PMULDQ:
34322 case X86ISD::PMULUDQ: {
34323 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
34325 SDValue LHS = Op.getOperand(0);
34326 SDValue RHS = Op.getOperand(1);
34327 // FIXME: Can we bound this better?
34328 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
34329 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
34332 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
34337 case X86ISD::VSHLI: {
34338 SDValue Op0 = Op.getOperand(0);
34339 SDValue Op1 = Op.getOperand(1);
34341 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34342 if (ShiftImm->getAPIntValue().uge(BitWidth))
34345 unsigned ShAmt = ShiftImm->getZExtValue();
34346 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
34348 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34349 // single shift. We can do this if the bottom bits (which are shifted
34350 // out) are never demanded.
34351 if (Op0.getOpcode() == X86ISD::VSRLI &&
34352 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
34353 if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
34354 if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
34355 int Diff = ShAmt - Shift2Imm->getZExtValue();
34357 return TLO.CombineTo(Op, Op0.getOperand(0));
34359 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
34360 SDValue NewShift = TLO.DAG.getNode(
34361 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
34362 TLO.DAG.getConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
34363 return TLO.CombineTo(Op, NewShift);
34368 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34372 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34373 Known.Zero <<= ShAmt;
34374 Known.One <<= ShAmt;
34376 // Low bits known zero.
34377 Known.Zero.setLowBits(ShAmt);
34381 case X86ISD::VSRLI: {
34382 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
34383 if (ShiftImm->getAPIntValue().uge(BitWidth))
34386 unsigned ShAmt = ShiftImm->getZExtValue();
34387 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34389 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
34390 OriginalDemandedElts, Known, TLO, Depth + 1))
34393 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34394 Known.Zero.lshrInPlace(ShAmt);
34395 Known.One.lshrInPlace(ShAmt);
34397 // High bits known zero.
34398 Known.Zero.setHighBits(ShAmt);
34402 case X86ISD::VSRAI: {
34403 SDValue Op0 = Op.getOperand(0);
34404 SDValue Op1 = Op.getOperand(1);
34406 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34407 if (ShiftImm->getAPIntValue().uge(BitWidth))
34410 unsigned ShAmt = ShiftImm->getZExtValue();
34411 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34413 // If we just want the sign bit then we don't need to shift it.
34414 if (OriginalDemandedBits.isSignMask())
34415 return TLO.CombineTo(Op, Op0);
34417 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
34418 if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
34419 SDValue Op00 = Op0.getOperand(0);
34420 unsigned NumSignBits =
34421 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
34422 if (ShAmt < NumSignBits)
34423 return TLO.CombineTo(Op, Op00);
34426 // If any of the demanded bits are produced by the sign extension, we also
34427 // demand the input sign bit.
34428 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
34429 DemandedMask.setSignBit();
34431 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34435 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34436 Known.Zero.lshrInPlace(ShAmt);
34437 Known.One.lshrInPlace(ShAmt);
34439 // If the input sign bit is known to be zero, or if none of the top bits
34440 // are demanded, turn this into an unsigned shift right.
34441 if (Known.Zero[BitWidth - ShAmt - 1] ||
34442 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
34443 return TLO.CombineTo(
34444 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
34446 // High bits are known one.
34447 if (Known.One[BitWidth - ShAmt - 1])
34448 Known.One.setHighBits(ShAmt);
34452 case X86ISD::PEXTRB:
34453 case X86ISD::PEXTRW: {
34454 SDValue Vec = Op.getOperand(0);
34455 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
34456 MVT VecVT = Vec.getSimpleValueType();
34457 unsigned NumVecElts = VecVT.getVectorNumElements();
34459 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
34460 unsigned Idx = CIdx->getZExtValue();
34461 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
34463 // If we demand no bits from the vector then we must have demanded
34464 // bits from the implict zext - simplify to zero.
34465 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
34466 if (DemandedVecBits == 0)
34467 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34469 APInt KnownUndef, KnownZero;
34470 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
34471 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
34472 KnownZero, TLO, Depth + 1))
34475 KnownBits KnownVec;
34476 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
34477 KnownVec, TLO, Depth + 1))
34480 Known = KnownVec.zext(BitWidth, true);
34485 case X86ISD::PINSRB:
34486 case X86ISD::PINSRW: {
34487 SDValue Vec = Op.getOperand(0);
34488 SDValue Scl = Op.getOperand(1);
34489 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
34490 MVT VecVT = Vec.getSimpleValueType();
34492 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
34493 unsigned Idx = CIdx->getZExtValue();
34494 if (!OriginalDemandedElts[Idx])
34495 return TLO.CombineTo(Op, Vec);
34497 KnownBits KnownVec;
34498 APInt DemandedVecElts(OriginalDemandedElts);
34499 DemandedVecElts.clearBit(Idx);
34500 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
34501 KnownVec, TLO, Depth + 1))
34504 KnownBits KnownScl;
34505 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
34506 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
34507 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
34510 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
34511 Known.One = KnownVec.One & KnownScl.One;
34512 Known.Zero = KnownVec.Zero & KnownScl.Zero;
34517 case X86ISD::PACKSS:
34518 // PACKSS saturates to MIN/MAX integer values. So if we just want the
34519 // sign bit then we can just ask for the source operands sign bit.
34520 // TODO - add known bits handling.
34521 if (OriginalDemandedBits.isSignMask()) {
34522 APInt DemandedLHS, DemandedRHS;
34523 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
34525 KnownBits KnownLHS, KnownRHS;
34526 APInt SignMask = APInt::getSignMask(BitWidth * 2);
34527 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
34528 KnownLHS, TLO, Depth + 1))
34530 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
34531 KnownRHS, TLO, Depth + 1))
34534 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
34536 case X86ISD::PCMPGT:
34537 // icmp sgt(0, R) == ashr(R, BitWidth-1).
34538 // iff we only need the sign bit then we can use R directly.
34539 if (OriginalDemandedBits.isSignMask() &&
34540 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
34541 return TLO.CombineTo(Op, Op.getOperand(1));
34543 case X86ISD::MOVMSK: {
34544 SDValue Src = Op.getOperand(0);
34545 MVT SrcVT = Src.getSimpleValueType();
34546 unsigned SrcBits = SrcVT.getScalarSizeInBits();
34547 unsigned NumElts = SrcVT.getVectorNumElements();
34549 // If we don't need the sign bits at all just return zero.
34550 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
34551 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34553 // Only demand the vector elements of the sign bits we need.
34554 APInt KnownUndef, KnownZero;
34555 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
34556 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
34560 Known.Zero = KnownZero.zextOrSelf(BitWidth);
34561 Known.Zero.setHighBits(BitWidth - NumElts);
34563 // MOVMSK only uses the MSB from each vector element.
34564 KnownBits KnownSrc;
34565 if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
34566 KnownSrc, TLO, Depth + 1))
34569 if (KnownSrc.One[SrcBits - 1])
34570 Known.One.setLowBits(NumElts);
34571 else if (KnownSrc.Zero[SrcBits - 1])
34572 Known.Zero.setLowBits(NumElts);
34577 return TargetLowering::SimplifyDemandedBitsForTargetNode(
34578 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
34581 /// Check if a vector extract from a target-specific shuffle of a load can be
34582 /// folded into a single element load.
34583 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
34584 /// shuffles have been custom lowered so we need to handle those here.
34585 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
34586 TargetLowering::DAGCombinerInfo &DCI) {
34587 if (DCI.isBeforeLegalizeOps())
34590 SDValue InVec = N->getOperand(0);
34591 SDValue EltNo = N->getOperand(1);
34592 EVT EltVT = N->getValueType(0);
34594 if (!isa<ConstantSDNode>(EltNo))
34597 EVT OriginalVT = InVec.getValueType();
34599 // Peek through bitcasts, don't duplicate a load with other uses.
34600 InVec = peekThroughOneUseBitcasts(InVec);
34602 EVT CurrentVT = InVec.getValueType();
34603 if (!CurrentVT.isVector() ||
34604 CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
34607 if (!isTargetShuffle(InVec.getOpcode()))
34610 // Don't duplicate a load with other uses.
34611 if (!InVec.hasOneUse())
34614 SmallVector<int, 16> ShuffleMask;
34615 SmallVector<SDValue, 2> ShuffleOps;
34617 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
34618 ShuffleOps, ShuffleMask, UnaryShuffle))
34621 // Select the input vector, guarding against out of range extract vector.
34622 unsigned NumElems = CurrentVT.getVectorNumElements();
34623 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
34624 int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
34626 if (Idx == SM_SentinelZero)
34627 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
34628 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
34629 if (Idx == SM_SentinelUndef)
34630 return DAG.getUNDEF(EltVT);
34632 // Bail if any mask element is SM_SentinelZero - getVectorShuffle below
34633 // won't handle it.
34634 if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
34637 assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
34638 SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0] : ShuffleOps[1];
34640 // If inputs to shuffle are the same for both ops, then allow 2 uses
34641 unsigned AllowedUses =
34642 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
34644 if (LdNode.getOpcode() == ISD::BITCAST) {
34645 // Don't duplicate a load with other uses.
34646 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
34649 AllowedUses = 1; // only allow 1 load use if we have a bitcast
34650 LdNode = LdNode.getOperand(0);
34653 if (!ISD::isNormalLoad(LdNode.getNode()))
34656 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
34658 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
34661 // If there's a bitcast before the shuffle, check if the load type and
34662 // alignment is valid.
34663 unsigned Align = LN0->getAlignment();
34664 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34665 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
34666 EltVT.getTypeForEVT(*DAG.getContext()));
34668 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
34671 // All checks match so transform back to vector_shuffle so that DAG combiner
34672 // can finish the job
34675 // Create shuffle node taking into account the case that its a unary shuffle
34676 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
34677 Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
34679 Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
34680 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
34684 // Helper to peek through bitops/setcc to determine size of source vector.
34685 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
34686 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
34687 switch (Src.getOpcode()) {
34689 return Src.getOperand(0).getValueSizeInBits() == Size;
34693 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
34694 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
34699 // Try to match patterns such as
34700 // (i16 bitcast (v16i1 x))
34702 // (i16 movmsk (16i8 sext (v16i1 x)))
34703 // before the illegal vector is scalarized on subtargets that don't have legal
34705 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
34707 const X86Subtarget &Subtarget) {
34708 EVT SrcVT = Src.getValueType();
34709 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
34712 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
34713 // movmskb even with avx512. This will be better than truncating to vXi1 and
34714 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
34715 // vpcmpeqb/vpcmpgtb.
34716 bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
34717 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
34718 Src.getOperand(0).getValueType() == MVT::v32i8 ||
34719 Src.getOperand(0).getValueType() == MVT::v64i8);
34721 // With AVX512 vxi1 types are legal and we prefer using k-regs.
34722 // MOVMSK is supported in SSE2 or later.
34723 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
34726 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
34727 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
34728 // v8i16 and v16i16.
34729 // For these two cases, we can shuffle the upper element bytes to a
34730 // consecutive sequence at the start of the vector and treat the results as
34731 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
34732 // for v16i16 this is not the case, because the shuffle is expensive, so we
34733 // avoid sign-extending to this type entirely.
34734 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
34735 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
34737 switch (SrcVT.getSimpleVT().SimpleTy) {
34741 SExtVT = MVT::v2i64;
34744 SExtVT = MVT::v4i32;
34745 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
34746 // sign-extend to a 256-bit operation to avoid truncation.
34747 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256))
34748 SExtVT = MVT::v4i64;
34751 SExtVT = MVT::v8i16;
34752 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
34753 // sign-extend to a 256-bit operation to match the compare.
34754 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
34755 // 256-bit because the shuffle is cheaper than sign extending the result of
34757 // TODO : use checkBitcastSrcVectorSize
34758 if (Src.getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
34759 (Src.getOperand(0).getValueType().is256BitVector() ||
34760 Src.getOperand(0).getValueType().is512BitVector())) {
34761 SExtVT = MVT::v8i32;
34765 SExtVT = MVT::v16i8;
34766 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
34767 // it is not profitable to sign-extend to 256-bit because this will
34768 // require an extra cross-lane shuffle which is more expensive than
34769 // truncating the result of the compare to 128-bits.
34772 SExtVT = MVT::v32i8;
34775 // If we have AVX512F, but not AVX512BW and the input is truncated from
34776 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
34777 if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
34778 SExtVT = MVT::v64i8;
34784 SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
34786 if (SExtVT == MVT::v64i8) {
34788 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
34789 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
34790 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
34791 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
34792 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
34793 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
34794 DAG.getConstant(32, DL, MVT::i8));
34795 V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
34796 } else if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
34797 V = getPMOVMSKB(DL, V, DAG, Subtarget);
34799 if (SExtVT == MVT::v8i16)
34800 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
34801 DAG.getUNDEF(MVT::v8i16));
34802 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
34806 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
34807 V = DAG.getZExtOrTrunc(V, DL, IntVT);
34808 return DAG.getBitcast(VT, V);
34811 // Convert a vXi1 constant build vector to the same width scalar integer.
34812 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
34813 EVT SrcVT = Op.getValueType();
34814 assert(SrcVT.getVectorElementType() == MVT::i1 &&
34815 "Expected a vXi1 vector");
34816 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
34817 "Expected a constant build vector");
34819 APInt Imm(SrcVT.getVectorNumElements(), 0);
34820 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
34821 SDValue In = Op.getOperand(Idx);
34822 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
34825 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
34826 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
34829 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
34830 TargetLowering::DAGCombinerInfo &DCI,
34831 const X86Subtarget &Subtarget) {
34832 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
34834 if (!DCI.isBeforeLegalizeOps())
34837 // Only do this if we have k-registers.
34838 if (!Subtarget.hasAVX512())
34841 EVT DstVT = N->getValueType(0);
34842 SDValue Op = N->getOperand(0);
34843 EVT SrcVT = Op.getValueType();
34845 if (!Op.hasOneUse())
34848 // Look for logic ops.
34849 if (Op.getOpcode() != ISD::AND &&
34850 Op.getOpcode() != ISD::OR &&
34851 Op.getOpcode() != ISD::XOR)
34854 // Make sure we have a bitcast between mask registers and a scalar type.
34855 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
34856 DstVT.isScalarInteger()) &&
34857 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
34858 SrcVT.isScalarInteger()))
34861 SDValue LHS = Op.getOperand(0);
34862 SDValue RHS = Op.getOperand(1);
34864 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
34865 LHS.getOperand(0).getValueType() == DstVT)
34866 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
34867 DAG.getBitcast(DstVT, RHS));
34869 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
34870 RHS.getOperand(0).getValueType() == DstVT)
34871 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
34872 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
34874 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
34875 // Most of these have to move a constant from the scalar domain anyway.
34876 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
34877 RHS = combinevXi1ConstantToInteger(RHS, DAG);
34878 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
34879 DAG.getBitcast(DstVT, LHS), RHS);
34885 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
34886 const X86Subtarget &Subtarget) {
34888 unsigned NumElts = BV->getNumOperands();
34889 SDValue Splat = BV->getSplatValue();
34891 // Build MMX element from integer GPR or SSE float values.
34892 auto CreateMMXElement = [&](SDValue V) {
34894 return DAG.getUNDEF(MVT::x86mmx);
34895 if (V.getValueType().isFloatingPoint()) {
34896 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
34897 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
34898 V = DAG.getBitcast(MVT::v2i64, V);
34899 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
34901 V = DAG.getBitcast(MVT::i32, V);
34903 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
34905 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
34908 // Convert build vector ops to MMX data in the bottom elements.
34909 SmallVector<SDValue, 8> Ops;
34911 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
34913 if (Splat.isUndef())
34914 return DAG.getUNDEF(MVT::x86mmx);
34916 Splat = CreateMMXElement(Splat);
34918 if (Subtarget.hasSSE1()) {
34919 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
34921 Splat = DAG.getNode(
34922 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
34923 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
34926 // Use PSHUFW to repeat 16-bit elements.
34927 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
34928 return DAG.getNode(
34929 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
34930 DAG.getConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32), Splat,
34931 DAG.getConstant(ShufMask, DL, MVT::i8));
34933 Ops.append(NumElts, Splat);
34935 for (unsigned i = 0; i != NumElts; ++i)
34936 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
34939 // Use tree of PUNPCKLs to build up general MMX vector.
34940 while (Ops.size() > 1) {
34941 unsigned NumOps = Ops.size();
34942 unsigned IntrinOp =
34943 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
34944 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
34945 : Intrinsic::x86_mmx_punpcklbw));
34946 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
34947 for (unsigned i = 0; i != NumOps; i += 2)
34948 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
34949 Ops[i], Ops[i + 1]);
34950 Ops.resize(NumOps / 2);
34956 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
34957 TargetLowering::DAGCombinerInfo &DCI,
34958 const X86Subtarget &Subtarget) {
34959 SDValue N0 = N->getOperand(0);
34960 EVT VT = N->getValueType(0);
34961 EVT SrcVT = N0.getValueType();
34963 // Try to match patterns such as
34964 // (i16 bitcast (v16i1 x))
34966 // (i16 movmsk (16i8 sext (v16i1 x)))
34967 // before the setcc result is scalarized on subtargets that don't have legal
34969 if (DCI.isBeforeLegalize()) {
34971 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
34974 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
34975 // type, widen both sides to avoid a trip through memory.
34976 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
34977 Subtarget.hasAVX512()) {
34978 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
34979 N0 = DAG.getBitcast(MVT::v8i1, N0);
34980 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
34981 DAG.getIntPtrConstant(0, dl));
34984 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
34985 // type, widen both sides to avoid a trip through memory.
34986 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
34987 Subtarget.hasAVX512()) {
34988 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
34989 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
34991 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
34992 N0 = DAG.getBitcast(MVT::i8, N0);
34993 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
34997 // Since MMX types are special and don't usually play with other vector types,
34998 // it's better to handle them early to be sure we emit efficient code by
34999 // avoiding store-load conversions.
35000 if (VT == MVT::x86mmx) {
35001 // Detect MMX constant vectors.
35003 SmallVector<APInt, 1> EltBits;
35004 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
35006 // Handle zero-extension of i32 with MOVD.
35007 if (EltBits[0].countLeadingZeros() >= 32)
35008 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
35009 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
35010 // Else, bitcast to a double.
35011 // TODO - investigate supporting sext 32-bit immediates on x86_64.
35012 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
35013 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
35016 // Detect bitcasts to x86mmx low word.
35017 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35018 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
35019 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
35020 bool LowUndef = true, AllUndefOrZero = true;
35021 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
35022 SDValue Op = N0.getOperand(i);
35023 LowUndef &= Op.isUndef() || (i >= e/2);
35024 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
35026 if (AllUndefOrZero) {
35027 SDValue N00 = N0.getOperand(0);
35029 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
35030 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
35031 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
35035 // Detect bitcasts of 64-bit build vectors and convert to a
35036 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
35038 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35039 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
35040 SrcVT == MVT::v8i8))
35041 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
35043 // Detect bitcasts between element or subvector extraction to x86mmx.
35044 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
35045 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
35046 isNullConstant(N0.getOperand(1))) {
35047 SDValue N00 = N0.getOperand(0);
35048 if (N00.getValueType().is128BitVector())
35049 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
35050 DAG.getBitcast(MVT::v2i64, N00));
35053 // Detect bitcasts from FP_TO_SINT to x86mmx.
35054 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
35056 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
35057 DAG.getUNDEF(MVT::v2i32));
35058 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
35059 DAG.getBitcast(MVT::v2i64, Res));
35063 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
35064 // most of these to scalar anyway.
35065 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
35066 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
35067 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
35068 return combinevXi1ConstantToInteger(N0, DAG);
35071 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
35072 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
35073 isa<ConstantSDNode>(N0)) {
35074 auto *C = cast<ConstantSDNode>(N0);
35075 if (C->isAllOnesValue())
35076 return DAG.getConstant(1, SDLoc(N0), VT);
35077 if (C->isNullValue())
35078 return DAG.getConstant(0, SDLoc(N0), VT);
35081 // Try to remove bitcasts from input and output of mask arithmetic to
35082 // remove GPR<->K-register crossings.
35083 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
35086 // Convert a bitcasted integer logic operation that has one bitcasted
35087 // floating-point operand into a floating-point logic operation. This may
35088 // create a load of a constant, but that is cheaper than materializing the
35089 // constant in an integer register and transferring it to an SSE register or
35090 // transferring the SSE operand to integer register and back.
35092 switch (N0.getOpcode()) {
35093 case ISD::AND: FPOpcode = X86ISD::FAND; break;
35094 case ISD::OR: FPOpcode = X86ISD::FOR; break;
35095 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
35096 default: return SDValue();
35099 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
35100 (Subtarget.hasSSE2() && VT == MVT::f64)))
35103 SDValue LogicOp0 = N0.getOperand(0);
35104 SDValue LogicOp1 = N0.getOperand(1);
35107 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
35108 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
35109 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
35110 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
35111 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
35112 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
35114 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
35115 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
35116 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
35117 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
35118 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
35119 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
35125 // Given a ABS node, detect the following pattern:
35126 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
35127 // This is useful as it is the input into a SAD pattern.
35128 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
35129 SDValue AbsOp1 = Abs->getOperand(0);
35130 if (AbsOp1.getOpcode() != ISD::SUB)
35133 Op0 = AbsOp1.getOperand(0);
35134 Op1 = AbsOp1.getOperand(1);
35136 // Check if the operands of the sub are zero-extended from vectors of i8.
35137 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
35138 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
35139 Op1.getOpcode() != ISD::ZERO_EXTEND ||
35140 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
35146 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
35148 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
35149 const SDValue &Zext1, const SDLoc &DL,
35150 const X86Subtarget &Subtarget) {
35151 // Find the appropriate width for the PSADBW.
35152 EVT InVT = Zext0.getOperand(0).getValueType();
35153 unsigned RegSize = std::max(128u, InVT.getSizeInBits());
35155 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
35156 // fill in the missing vector elements with 0.
35157 unsigned NumConcat = RegSize / InVT.getSizeInBits();
35158 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
35159 Ops[0] = Zext0.getOperand(0);
35160 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
35161 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35162 Ops[0] = Zext1.getOperand(0);
35163 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35165 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
35166 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
35167 ArrayRef<SDValue> Ops) {
35168 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
35169 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
35171 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
35172 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
35176 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
35178 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
35179 const X86Subtarget &Subtarget) {
35180 // Bail without SSE41.
35181 if (!Subtarget.hasSSE41())
35184 EVT ExtractVT = Extract->getValueType(0);
35185 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
35188 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
35189 ISD::NodeType BinOp;
35190 SDValue Src = DAG.matchBinOpReduction(
35191 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN});
35195 EVT SrcVT = Src.getValueType();
35196 EVT SrcSVT = SrcVT.getScalarType();
35197 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
35201 SDValue MinPos = Src;
35203 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
35204 while (SrcVT.getSizeInBits() > 128) {
35205 unsigned NumElts = SrcVT.getVectorNumElements();
35206 unsigned NumSubElts = NumElts / 2;
35207 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
35208 unsigned SubSizeInBits = SrcVT.getSizeInBits();
35209 SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
35210 SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
35211 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
35213 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
35214 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
35215 "Unexpected value type");
35217 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
35218 // to flip the value accordingly.
35220 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
35221 if (BinOp == ISD::SMAX)
35222 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
35223 else if (BinOp == ISD::SMIN)
35224 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
35225 else if (BinOp == ISD::UMAX)
35226 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
35229 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35231 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
35232 // shuffling each upper element down and insert zeros. This means that the
35233 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
35234 // ready for the PHMINPOS.
35235 if (ExtractVT == MVT::i8) {
35236 SDValue Upper = DAG.getVectorShuffle(
35237 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
35238 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
35239 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
35242 // Perform the PHMINPOS on a v8i16 vector,
35243 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
35244 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
35245 MinPos = DAG.getBitcast(SrcVT, MinPos);
35248 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35250 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
35251 DAG.getIntPtrConstant(0, DL));
35254 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
35255 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
35257 const X86Subtarget &Subtarget) {
35258 // Bail without SSE2.
35259 if (!Subtarget.hasSSE2())
35262 EVT ExtractVT = Extract->getValueType(0);
35263 unsigned BitWidth = ExtractVT.getSizeInBits();
35264 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
35265 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
35268 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
35269 ISD::NodeType BinOp;
35270 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
35271 if (!Match && ExtractVT == MVT::i1)
35272 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
35276 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
35277 // which we can't support here for now.
35278 if (Match.getScalarValueSizeInBits() != BitWidth)
35283 EVT MatchVT = Match.getValueType();
35284 unsigned NumElts = MatchVT.getVectorNumElements();
35286 if (ExtractVT == MVT::i1) {
35287 // Special case for (pre-legalization) vXi1 reductions.
35290 if (DAG.getTargetLoweringInfo().isTypeLegal(MatchVT)) {
35291 // If this is a legal AVX512 predicate type then we can just bitcast.
35292 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35293 Movmsk = DAG.getBitcast(MovmskVT, Match);
35295 // Use combineBitcastvxi1 to create the MOVMSK.
35296 if (NumElts == 32 && !Subtarget.hasInt256()) {
35298 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35299 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35302 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35303 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
35307 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, MVT::i32);
35309 // Bail with AVX512VL (which uses predicate registers).
35310 if (Subtarget.hasVLX())
35313 unsigned MatchSizeInBits = Match.getValueSizeInBits();
35314 if (!(MatchSizeInBits == 128 ||
35315 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
35318 // Make sure this isn't a vector of 1 element. The perf win from using
35319 // MOVMSK diminishes with less elements in the reduction, but it is
35320 // generally better to get the comparison over to the GPRs as soon as
35321 // possible to reduce the number of vector ops.
35322 if (Match.getValueType().getVectorNumElements() < 2)
35325 // Check that we are extracting a reduction of all sign bits.
35326 if (DAG.ComputeNumSignBits(Match) != BitWidth)
35329 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
35331 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35332 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35333 MatchSizeInBits = Match.getValueSizeInBits();
35336 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
35338 if (64 == BitWidth || 32 == BitWidth)
35339 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
35340 MatchSizeInBits / BitWidth);
35342 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
35344 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
35345 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
35346 NumElts = MaskSrcVT.getVectorNumElements();
35348 assert(NumElts <= 32 && "Not expecting more than 32 elements");
35350 if (BinOp == ISD::XOR) {
35351 // parity -> (AND (CTPOP(MOVMSK X)), 1)
35352 SDValue Mask = DAG.getConstant(1, DL, MVT::i32);
35353 SDValue Result = DAG.getNode(ISD::CTPOP, DL, MVT::i32, Movmsk);
35354 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, Mask);
35355 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
35359 ISD::CondCode CondCode;
35360 if (BinOp == ISD::OR) {
35361 // any_of -> MOVMSK != 0
35362 CmpC = DAG.getConstant(0, DL, MVT::i32);
35363 CondCode = ISD::CondCode::SETNE;
35365 // all_of -> MOVMSK == ((1 << NumElts) - 1)
35366 CmpC = DAG.getConstant((1ULL << NumElts) - 1, DL, MVT::i32);
35367 CondCode = ISD::CondCode::SETEQ;
35370 // The setcc produces an i8 of 0/1, so extend that to the result width and
35371 // negate to get the final 0/-1 mask value.
35372 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35374 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
35375 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
35376 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
35377 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
35378 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
35381 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
35382 const X86Subtarget &Subtarget) {
35383 // PSADBW is only supported on SSE2 and up.
35384 if (!Subtarget.hasSSE2())
35387 // Verify the type we're extracting from is any integer type above i16.
35388 EVT VT = Extract->getOperand(0).getValueType();
35389 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
35392 unsigned RegSize = 128;
35393 if (Subtarget.useBWIRegs())
35395 else if (Subtarget.hasAVX())
35398 // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
35399 // TODO: We should be able to handle larger vectors by splitting them before
35400 // feeding them into several SADs, and then reducing over those.
35401 if (RegSize / VT.getVectorNumElements() < 8)
35404 // Match shuffle + add pyramid.
35405 ISD::NodeType BinOp;
35406 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
35408 // The operand is expected to be zero extended from i8
35409 // (verified in detectZextAbsDiff).
35410 // In order to convert to i64 and above, additional any/zero/sign
35411 // extend is expected.
35412 // The zero extend from 32 bit has no mathematical effect on the result.
35413 // Also the sign extend is basically zero extend
35414 // (extends the sign bit which is zero).
35415 // So it is correct to skip the sign/zero extend instruction.
35416 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
35417 Root.getOpcode() == ISD::ZERO_EXTEND ||
35418 Root.getOpcode() == ISD::ANY_EXTEND))
35419 Root = Root.getOperand(0);
35421 // If there was a match, we want Root to be a select that is the root of an
35422 // abs-diff pattern.
35423 if (!Root || Root.getOpcode() != ISD::ABS)
35426 // Check whether we have an abs-diff pattern feeding into the select.
35427 SDValue Zext0, Zext1;
35428 if (!detectZextAbsDiff(Root, Zext0, Zext1))
35431 // Create the SAD instruction.
35433 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
35435 // If the original vector was wider than 8 elements, sum over the results
35436 // in the SAD vector.
35437 unsigned Stages = Log2_32(VT.getVectorNumElements());
35438 MVT SadVT = SAD.getSimpleValueType();
35440 unsigned SadElems = SadVT.getVectorNumElements();
35442 for(unsigned i = Stages - 3; i > 0; --i) {
35443 SmallVector<int, 16> Mask(SadElems, -1);
35444 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
35445 Mask[j] = MaskEnd + j;
35448 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
35449 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
35453 MVT Type = Extract->getSimpleValueType(0);
35454 unsigned TypeSizeInBits = Type.getSizeInBits();
35455 // Return the lowest TypeSizeInBits bits.
35456 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
35457 SAD = DAG.getBitcast(ResVT, SAD);
35458 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
35459 Extract->getOperand(1));
35462 // Attempt to peek through a target shuffle and extract the scalar from the
35464 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
35465 TargetLowering::DAGCombinerInfo &DCI,
35466 const X86Subtarget &Subtarget) {
35467 if (DCI.isBeforeLegalizeOps())
35470 SDValue Src = N->getOperand(0);
35471 SDValue Idx = N->getOperand(1);
35473 EVT VT = N->getValueType(0);
35474 EVT SrcVT = Src.getValueType();
35475 EVT SrcSVT = SrcVT.getVectorElementType();
35476 unsigned NumSrcElts = SrcVT.getVectorNumElements();
35478 // Don't attempt this for boolean mask vectors or unknown extraction indices.
35479 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
35482 SDValue SrcBC = peekThroughBitcasts(Src);
35484 // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
35485 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
35486 SDValue SrcOp = SrcBC.getOperand(0);
35487 if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
35488 return DAG.getBitcast(VT, SrcOp);
35491 // Resolve the target shuffle inputs and mask.
35492 SmallVector<int, 16> Mask;
35493 SmallVector<SDValue, 2> Ops;
35494 if (!resolveTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
35497 // Attempt to narrow/widen the shuffle mask to the correct size.
35498 if (Mask.size() != NumSrcElts) {
35499 if ((NumSrcElts % Mask.size()) == 0) {
35500 SmallVector<int, 16> ScaledMask;
35501 int Scale = NumSrcElts / Mask.size();
35502 scaleShuffleMask<int>(Scale, Mask, ScaledMask);
35503 Mask = std::move(ScaledMask);
35504 } else if ((Mask.size() % NumSrcElts) == 0) {
35505 // Simplify Mask based on demanded element.
35506 int ExtractIdx = (int)N->getConstantOperandVal(1);
35507 int Scale = Mask.size() / NumSrcElts;
35508 int Lo = Scale * ExtractIdx;
35509 int Hi = Scale * (ExtractIdx + 1);
35510 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
35511 if (i < Lo || Hi <= i)
35512 Mask[i] = SM_SentinelUndef;
35514 SmallVector<int, 16> WidenedMask;
35515 while (Mask.size() > NumSrcElts &&
35516 canWidenShuffleElements(Mask, WidenedMask))
35517 Mask = std::move(WidenedMask);
35518 // TODO - investigate support for wider shuffle masks with known upper
35519 // undef/zero elements for implicit zero-extension.
35523 // Check if narrowing/widening failed.
35524 if (Mask.size() != NumSrcElts)
35527 int SrcIdx = Mask[N->getConstantOperandVal(1)];
35530 // If the shuffle source element is undef/zero then we can just accept it.
35531 if (SrcIdx == SM_SentinelUndef)
35532 return DAG.getUNDEF(VT);
35534 if (SrcIdx == SM_SentinelZero)
35535 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
35536 : DAG.getConstant(0, dl, VT);
35538 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
35539 SrcIdx = SrcIdx % Mask.size();
35541 // We can only extract other elements from 128-bit vectors and in certain
35542 // circumstances, depending on SSE-level.
35543 // TODO: Investigate using extract_subvector for larger vectors.
35544 // TODO: Investigate float/double extraction if it will be just stored.
35545 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
35546 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
35547 assert(SrcSVT == VT && "Unexpected extraction type");
35548 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
35549 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
35550 DAG.getIntPtrConstant(SrcIdx, dl));
35553 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
35554 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
35555 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
35556 "Unexpected extraction type");
35557 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
35558 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
35559 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
35560 DAG.getIntPtrConstant(SrcIdx, dl));
35561 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
35567 /// Extracting a scalar FP value from vector element 0 is free, so extract each
35568 /// operand first, then perform the math as a scalar op.
35569 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
35570 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
35571 SDValue Vec = ExtElt->getOperand(0);
35572 SDValue Index = ExtElt->getOperand(1);
35573 EVT VT = ExtElt->getValueType(0);
35574 EVT VecVT = Vec.getValueType();
35576 // TODO: If this is a unary/expensive/expand op, allow extraction from a
35577 // non-zero element because the shuffle+scalar op will be cheaper?
35578 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
35581 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
35582 // extract, the condition code), so deal with those as a special-case.
35583 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
35584 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
35585 if (OpVT != MVT::f32 && OpVT != MVT::f64)
35588 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
35590 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
35591 Vec.getOperand(0), Index);
35592 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
35593 Vec.getOperand(1), Index);
35594 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
35597 if (VT != MVT::f32 && VT != MVT::f64)
35600 // Vector FP selects don't fit the pattern of FP math ops (because the
35601 // condition has a different type and we have to change the opcode), so deal
35602 // with those here.
35603 // FIXME: This is restricted to pre type legalization by ensuring the setcc
35604 // has i1 elements. If we loosen this we need to convert vector bool to a
35606 if (Vec.getOpcode() == ISD::VSELECT &&
35607 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
35608 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
35609 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
35610 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
35612 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
35613 Vec.getOperand(0).getValueType().getScalarType(),
35614 Vec.getOperand(0), Index);
35615 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
35616 Vec.getOperand(1), Index);
35617 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
35618 Vec.getOperand(2), Index);
35619 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
35622 // TODO: This switch could include FNEG and the x86-specific FP logic ops
35623 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
35624 // missed load folding and fma+fneg combining.
35625 switch (Vec.getOpcode()) {
35626 case ISD::FMA: // Begin 3 operands
35628 case ISD::FADD: // Begin 2 operands
35633 case ISD::FCOPYSIGN:
35636 case ISD::FMINNUM_IEEE:
35637 case ISD::FMAXNUM_IEEE:
35638 case ISD::FMAXIMUM:
35639 case ISD::FMINIMUM:
35642 case ISD::FABS: // Begin 1 operand
35647 case ISD::FNEARBYINT:
35651 case X86ISD::FRSQRT: {
35652 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
35654 SmallVector<SDValue, 4> ExtOps;
35655 for (SDValue Op : Vec->ops())
35656 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
35657 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
35662 llvm_unreachable("All opcodes should return within switch");
35665 /// Try to convert a vector reduction sequence composed of binops and shuffles
35666 /// into horizontal ops.
35667 static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
35668 const X86Subtarget &Subtarget) {
35669 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
35670 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
35671 if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
35673 SDValue Index = ExtElt->getOperand(1);
35674 if (!isNullConstant(Index))
35677 // TODO: Allow FADD with reduction and/or reassociation and no-signed-zeros.
35679 SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD});
35683 EVT VT = ExtElt->getValueType(0);
35684 EVT VecVT = ExtElt->getOperand(0).getValueType();
35685 if (VecVT.getScalarType() != VT)
35688 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
35691 // 256-bit horizontal instructions operate on 128-bit chunks rather than
35692 // across the whole vector, so we need an extract + hop preliminary stage.
35693 // This is the only step where the operands of the hop are not the same value.
35694 // TODO: We could extend this to handle 512-bit or even longer vectors.
35695 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
35696 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
35697 unsigned NumElts = VecVT.getVectorNumElements();
35698 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
35699 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
35700 VecVT = EVT::getVectorVT(*DAG.getContext(), VT, NumElts / 2);
35701 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Hi, Lo);
35703 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
35704 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
35707 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
35708 assert(Rdx.getValueType() == VecVT && "Unexpected reduction match");
35709 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
35710 for (unsigned i = 0; i != ReductionSteps; ++i)
35711 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
35713 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
35716 /// Detect vector gather/scatter index generation and convert it from being a
35717 /// bunch of shuffles and extracts into a somewhat faster sequence.
35718 /// For i686, the best sequence is apparently storing the value and loading
35719 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
35720 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
35721 TargetLowering::DAGCombinerInfo &DCI,
35722 const X86Subtarget &Subtarget) {
35723 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
35726 SDValue InputVector = N->getOperand(0);
35727 SDValue EltIdx = N->getOperand(1);
35728 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
35730 EVT SrcVT = InputVector.getValueType();
35731 EVT VT = N->getValueType(0);
35732 SDLoc dl(InputVector);
35733 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
35735 if (CIdx && CIdx->getAPIntValue().uge(SrcVT.getVectorNumElements()))
35736 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
35738 // Integer Constant Folding.
35739 if (CIdx && VT.isInteger()) {
35740 APInt UndefVecElts;
35741 SmallVector<APInt, 16> EltBits;
35742 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
35743 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
35744 EltBits, true, false)) {
35745 uint64_t Idx = CIdx->getZExtValue();
35746 if (UndefVecElts[Idx])
35747 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
35748 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
35753 // TODO - Remove this once we can handle the implicit zero-extension of
35754 // X86ISD::PEXTRW/X86ISD::PEXTRB in:
35755 // XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and
35756 // combineBasicSADPattern.
35758 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35759 if (TLI.SimplifyDemandedBits(
35760 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
35761 return SDValue(N, 0);
35765 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
35768 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
35769 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
35770 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
35771 SDValue MMXSrc = InputVector.getOperand(0);
35773 // The bitcast source is a direct mmx result.
35774 if (MMXSrc.getValueType() == MVT::x86mmx)
35775 return DAG.getBitcast(VT, InputVector);
35778 // Detect mmx to i32 conversion through a v2i32 elt extract.
35779 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
35780 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
35781 SDValue MMXSrc = InputVector.getOperand(0);
35783 // The bitcast source is a direct mmx result.
35784 if (MMXSrc.getValueType() == MVT::x86mmx)
35785 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
35788 // Check whether this extract is the root of a sum of absolute differences
35789 // pattern. This has to be done here because we really want it to happen
35790 // pre-legalization,
35791 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
35794 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
35795 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
35798 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
35799 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
35802 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
35805 if (SDValue V = scalarizeExtEltFP(N, DAG))
35808 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
35809 // and then testing the relevant element.
35810 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
35811 SmallVector<SDNode *, 16> BoolExtracts;
35812 auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
35813 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
35814 isa<ConstantSDNode>(Use->getOperand(1)) &&
35815 Use->getValueType(0) == MVT::i1) {
35816 BoolExtracts.push_back(Use);
35821 if (all_of(InputVector->uses(), IsBoolExtract) &&
35822 BoolExtracts.size() > 1) {
35823 unsigned NumSrcElts = SrcVT.getVectorNumElements();
35824 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
35826 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
35827 for (SDNode *Use : BoolExtracts) {
35828 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
35829 unsigned MaskIdx = Use->getConstantOperandVal(1);
35830 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
35831 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
35832 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
35833 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
35834 DCI.CombineTo(Use, Res);
35836 return SDValue(N, 0);
35844 /// If a vector select has an operand that is -1 or 0, try to simplify the
35845 /// select to a bitwise logic operation.
35846 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
35848 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
35849 TargetLowering::DAGCombinerInfo &DCI,
35850 const X86Subtarget &Subtarget) {
35851 SDValue Cond = N->getOperand(0);
35852 SDValue LHS = N->getOperand(1);
35853 SDValue RHS = N->getOperand(2);
35854 EVT VT = LHS.getValueType();
35855 EVT CondVT = Cond.getValueType();
35857 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35859 if (N->getOpcode() != ISD::VSELECT)
35862 assert(CondVT.isVector() && "Vector select expects a vector selector!");
35864 // Check if the first operand is all zeros and Cond type is vXi1.
35865 // This situation only applies to avx512.
35866 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
35867 // TODO: Can we assert that both operands are not zeros (because that should
35868 // get simplified at node creation time)?
35869 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
35870 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
35871 if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
35872 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
35873 // Invert the cond to not(cond) : xor(op,allones)=not(op)
35874 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
35875 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
35876 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
35879 // To use the condition operand as a bitwise mask, it must have elements that
35880 // are the same size as the select elements. Ie, the condition operand must
35881 // have already been promoted from the IR select condition type <N x i1>.
35882 // Don't check if the types themselves are equal because that excludes
35883 // vector floating-point selects.
35884 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
35887 // Try to invert the condition if true value is not all 1s and false value is
35888 // not all 0s. Only do this if the condition has one use.
35889 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
35890 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
35891 // Check if the selector will be produced by CMPP*/PCMP*.
35892 Cond.getOpcode() == ISD::SETCC &&
35893 // Check if SETCC has already been promoted.
35894 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
35896 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
35898 if (TValIsAllZeros || FValIsAllOnes) {
35899 SDValue CC = Cond.getOperand(2);
35900 ISD::CondCode NewCC =
35901 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
35902 Cond.getOperand(0).getValueType().isInteger());
35903 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
35905 std::swap(LHS, RHS);
35906 TValIsAllOnes = FValIsAllOnes;
35907 FValIsAllZeros = TValIsAllZeros;
35911 // Cond value must be 'sign splat' to be converted to a logical op.
35912 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
35915 // vselect Cond, 111..., 000... -> Cond
35916 if (TValIsAllOnes && FValIsAllZeros)
35917 return DAG.getBitcast(VT, Cond);
35919 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
35922 // vselect Cond, 111..., X -> or Cond, X
35923 if (TValIsAllOnes) {
35924 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
35925 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
35926 return DAG.getBitcast(VT, Or);
35929 // vselect Cond, X, 000... -> and Cond, X
35930 if (FValIsAllZeros) {
35931 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
35932 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
35933 return DAG.getBitcast(VT, And);
35936 // vselect Cond, 000..., X -> andn Cond, X
35937 if (TValIsAllZeros) {
35938 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
35939 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
35940 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
35941 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
35942 return DAG.getBitcast(VT, AndN);
35948 /// If both arms of a vector select are concatenated vectors, split the select,
35949 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
35950 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
35951 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
35952 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
35953 const X86Subtarget &Subtarget) {
35954 unsigned Opcode = N->getOpcode();
35955 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
35958 // TODO: Split 512-bit vectors too?
35959 EVT VT = N->getValueType(0);
35960 if (!VT.is256BitVector())
35963 // TODO: Split as long as any 2 of the 3 operands are concatenated?
35964 SDValue Cond = N->getOperand(0);
35965 SDValue TVal = N->getOperand(1);
35966 SDValue FVal = N->getOperand(2);
35967 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
35968 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
35969 !collectConcatOps(TVal.getNode(), CatOpsT) ||
35970 !collectConcatOps(FVal.getNode(), CatOpsF))
35973 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
35974 ArrayRef<SDValue> Ops) {
35975 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
35977 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
35978 makeBlend, /*CheckBWI*/ false);
35981 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
35982 SDValue Cond = N->getOperand(0);
35983 SDValue LHS = N->getOperand(1);
35984 SDValue RHS = N->getOperand(2);
35987 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
35988 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
35989 if (!TrueC || !FalseC)
35992 // Don't do this for crazy integer types.
35993 EVT VT = N->getValueType(0);
35994 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
35997 // We're going to use the condition bit in math or logic ops. We could allow
35998 // this with a wider condition value (post-legalization it becomes an i8),
35999 // but if nothing is creating selects that late, it doesn't matter.
36000 if (Cond.getValueType() != MVT::i1)
36003 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
36004 // 3, 5, or 9 with i32/i64, so those get transformed too.
36005 // TODO: For constants that overflow or do not differ by power-of-2 or small
36006 // multiplier, convert to 'and' + 'add'.
36007 const APInt &TrueVal = TrueC->getAPIntValue();
36008 const APInt &FalseVal = FalseC->getAPIntValue();
36010 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
36014 APInt AbsDiff = Diff.abs();
36015 if (AbsDiff.isPowerOf2() ||
36016 ((VT == MVT::i32 || VT == MVT::i64) &&
36017 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
36019 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
36020 // of the condition can usually be folded into a compare predicate, but even
36021 // without that, the sequence should be cheaper than a CMOV alternative.
36022 if (TrueVal.slt(FalseVal)) {
36023 Cond = DAG.getNOT(DL, Cond, MVT::i1);
36024 std::swap(TrueC, FalseC);
36027 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
36028 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
36030 // Multiply condition by the difference if non-one.
36031 if (!AbsDiff.isOneValue())
36032 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
36034 // Add the base if non-zero.
36035 if (!FalseC->isNullValue())
36036 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
36044 /// If this is a *dynamic* select (non-constant condition) and we can match
36045 /// this node with one of the variable blend instructions, restructure the
36046 /// condition so that blends can use the high (sign) bit of each element.
36047 /// This function will also call SimplifyDemandedBits on already created
36048 /// BLENDV to perform additional simplifications.
36049 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
36050 TargetLowering::DAGCombinerInfo &DCI,
36051 const X86Subtarget &Subtarget) {
36052 SDValue Cond = N->getOperand(0);
36053 if ((N->getOpcode() != ISD::VSELECT &&
36054 N->getOpcode() != X86ISD::BLENDV) ||
36055 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
36058 // Don't optimize before the condition has been transformed to a legal type
36059 // and don't ever optimize vector selects that map to AVX512 mask-registers.
36060 unsigned BitWidth = Cond.getScalarValueSizeInBits();
36061 if (BitWidth < 8 || BitWidth > 64)
36064 // We can only handle the cases where VSELECT is directly legal on the
36065 // subtarget. We custom lower VSELECT nodes with constant conditions and
36066 // this makes it hard to see whether a dynamic VSELECT will correctly
36067 // lower, so we both check the operation's status and explicitly handle the
36068 // cases where a *dynamic* blend will fail even though a constant-condition
36069 // blend could be custom lowered.
36070 // FIXME: We should find a better way to handle this class of problems.
36071 // Potentially, we should combine constant-condition vselect nodes
36072 // pre-legalization into shuffles and not mark as many types as custom
36074 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36075 EVT VT = N->getValueType(0);
36076 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
36078 // FIXME: We don't support i16-element blends currently. We could and
36079 // should support them by making *all* the bits in the condition be set
36080 // rather than just the high bit and using an i8-element blend.
36081 if (VT.getVectorElementType() == MVT::i16)
36083 // Dynamic blending was only available from SSE4.1 onward.
36084 if (VT.is128BitVector() && !Subtarget.hasSSE41())
36086 // Byte blends are only available in AVX2
36087 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
36089 // There are no 512-bit blend instructions that use sign bits.
36090 if (VT.is512BitVector())
36093 // TODO: Add other opcodes eventually lowered into BLEND.
36094 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
36096 if ((UI->getOpcode() != ISD::VSELECT &&
36097 UI->getOpcode() != X86ISD::BLENDV) ||
36098 UI.getOperandNo() != 0)
36101 APInt DemandedMask(APInt::getSignMask(BitWidth));
36103 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
36104 !DCI.isBeforeLegalizeOps());
36105 if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
36108 // If we changed the computation somewhere in the DAG, this change will
36109 // affect all users of Cond. Update all the nodes so that we do not use
36110 // the generic VSELECT anymore. Otherwise, we may perform wrong
36111 // optimizations as we messed with the actual expectation for the vector
36113 for (SDNode *U : Cond->uses()) {
36114 if (U->getOpcode() == X86ISD::BLENDV)
36117 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
36118 Cond, U->getOperand(1), U->getOperand(2));
36119 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
36120 DCI.AddToWorklist(U);
36122 DCI.CommitTargetLoweringOpt(TLO);
36123 return SDValue(N, 0);
36126 /// Do target-specific dag combines on SELECT and VSELECT nodes.
36127 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
36128 TargetLowering::DAGCombinerInfo &DCI,
36129 const X86Subtarget &Subtarget) {
36131 SDValue Cond = N->getOperand(0);
36132 SDValue LHS = N->getOperand(1);
36133 SDValue RHS = N->getOperand(2);
36135 // Try simplification again because we use this function to optimize
36136 // BLENDV nodes that are not handled by the generic combiner.
36137 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
36140 EVT VT = LHS.getValueType();
36141 EVT CondVT = Cond.getValueType();
36142 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36144 // Convert vselects with constant condition into shuffles.
36145 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
36146 DCI.isBeforeLegalizeOps()) {
36147 SmallVector<int, 64> Mask;
36148 if (createShuffleMaskFromVSELECT(Mask, Cond))
36149 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
36152 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
36153 // instructions match the semantics of the common C idiom x<y?x:y but not
36154 // x<=y?x:y, because of how they handle negative zero (which can be
36155 // ignored in unsafe-math mode).
36156 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
36157 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
36158 VT != MVT::f80 && VT != MVT::f128 &&
36159 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
36160 (Subtarget.hasSSE2() ||
36161 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
36162 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36164 unsigned Opcode = 0;
36165 // Check for x CC y ? x : y.
36166 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
36167 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
36171 // Converting this to a min would handle NaNs incorrectly, and swapping
36172 // the operands would cause it to handle comparisons between positive
36173 // and negative zero incorrectly.
36174 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36175 if (!DAG.getTarget().Options.UnsafeFPMath &&
36176 !(DAG.isKnownNeverZeroFloat(LHS) ||
36177 DAG.isKnownNeverZeroFloat(RHS)))
36179 std::swap(LHS, RHS);
36181 Opcode = X86ISD::FMIN;
36184 // Converting this to a min would handle comparisons between positive
36185 // and negative zero incorrectly.
36186 if (!DAG.getTarget().Options.UnsafeFPMath &&
36187 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36189 Opcode = X86ISD::FMIN;
36192 // Converting this to a min would handle both negative zeros and NaNs
36193 // incorrectly, but we can swap the operands to fix both.
36194 std::swap(LHS, RHS);
36199 Opcode = X86ISD::FMIN;
36203 // Converting this to a max would handle comparisons between positive
36204 // and negative zero incorrectly.
36205 if (!DAG.getTarget().Options.UnsafeFPMath &&
36206 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36208 Opcode = X86ISD::FMAX;
36211 // Converting this to a max would handle NaNs incorrectly, and swapping
36212 // the operands would cause it to handle comparisons between positive
36213 // and negative zero incorrectly.
36214 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36215 if (!DAG.getTarget().Options.UnsafeFPMath &&
36216 !(DAG.isKnownNeverZeroFloat(LHS) ||
36217 DAG.isKnownNeverZeroFloat(RHS)))
36219 std::swap(LHS, RHS);
36221 Opcode = X86ISD::FMAX;
36224 // Converting this to a max would handle both negative zeros and NaNs
36225 // incorrectly, but we can swap the operands to fix both.
36226 std::swap(LHS, RHS);
36231 Opcode = X86ISD::FMAX;
36234 // Check for x CC y ? y : x -- a min/max with reversed arms.
36235 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
36236 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
36240 // Converting this to a min would handle comparisons between positive
36241 // and negative zero incorrectly, and swapping the operands would
36242 // cause it to handle NaNs incorrectly.
36243 if (!DAG.getTarget().Options.UnsafeFPMath &&
36244 !(DAG.isKnownNeverZeroFloat(LHS) ||
36245 DAG.isKnownNeverZeroFloat(RHS))) {
36246 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36248 std::swap(LHS, RHS);
36250 Opcode = X86ISD::FMIN;
36253 // Converting this to a min would handle NaNs incorrectly.
36254 if (!DAG.getTarget().Options.UnsafeFPMath &&
36255 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
36257 Opcode = X86ISD::FMIN;
36260 // Converting this to a min would handle both negative zeros and NaNs
36261 // incorrectly, but we can swap the operands to fix both.
36262 std::swap(LHS, RHS);
36267 Opcode = X86ISD::FMIN;
36271 // Converting this to a max would handle NaNs incorrectly.
36272 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36274 Opcode = X86ISD::FMAX;
36277 // Converting this to a max would handle comparisons between positive
36278 // and negative zero incorrectly, and swapping the operands would
36279 // cause it to handle NaNs incorrectly.
36280 if (!DAG.getTarget().Options.UnsafeFPMath &&
36281 !DAG.isKnownNeverZeroFloat(LHS) &&
36282 !DAG.isKnownNeverZeroFloat(RHS)) {
36283 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36285 std::swap(LHS, RHS);
36287 Opcode = X86ISD::FMAX;
36290 // Converting this to a max would handle both negative zeros and NaNs
36291 // incorrectly, but we can swap the operands to fix both.
36292 std::swap(LHS, RHS);
36297 Opcode = X86ISD::FMAX;
36303 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
36306 // Some mask scalar intrinsics rely on checking if only one bit is set
36307 // and implement it in C code like this:
36308 // A[0] = (U & 1) ? A[0] : W[0];
36309 // This creates some redundant instructions that break pattern matching.
36310 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
36311 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
36312 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
36313 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36314 SDValue AndNode = Cond.getOperand(0);
36315 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
36316 isNullConstant(Cond.getOperand(1)) &&
36317 isOneConstant(AndNode.getOperand(1))) {
36318 // LHS and RHS swapped due to
36319 // setcc outputting 1 when AND resulted in 0 and vice versa.
36320 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
36321 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
36325 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
36326 // lowering on KNL. In this case we convert it to
36327 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
36328 // The same situation all vectors of i8 and i16 without BWI.
36329 // Make sure we extend these even before type legalization gets a chance to
36330 // split wide vectors.
36331 // Since SKX these selects have a proper lowering.
36332 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
36333 CondVT.getVectorElementType() == MVT::i1 &&
36334 (ExperimentalVectorWideningLegalization ||
36335 VT.getVectorNumElements() > 4) &&
36336 (VT.getVectorElementType() == MVT::i8 ||
36337 VT.getVectorElementType() == MVT::i16)) {
36338 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
36339 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
36342 // AVX512 - Extend select with zero to merge with target shuffle.
36343 // select(mask, extract_subvector(shuffle(x)), zero) -->
36344 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
36345 // TODO - support non target shuffles as well.
36346 if (Subtarget.hasAVX512() && CondVT.isVector() &&
36347 CondVT.getVectorElementType() == MVT::i1) {
36348 auto SelectableOp = [&TLI](SDValue Op) {
36349 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36350 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
36351 isNullConstant(Op.getOperand(1)) &&
36352 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
36353 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
36356 bool SelectableLHS = SelectableOp(LHS);
36357 bool SelectableRHS = SelectableOp(RHS);
36358 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
36359 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
36361 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
36362 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
36363 : RHS.getOperand(0).getValueType();
36364 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36365 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
36366 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
36367 VT.getSizeInBits());
36368 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
36369 VT.getSizeInBits());
36370 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
36371 DAG.getUNDEF(SrcCondVT), Cond,
36372 DAG.getIntPtrConstant(0, DL));
36373 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
36374 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
36378 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
36381 // Canonicalize max and min:
36382 // (x > y) ? x : y -> (x >= y) ? x : y
36383 // (x < y) ? x : y -> (x <= y) ? x : y
36384 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
36385 // the need for an extra compare
36386 // against zero. e.g.
36387 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
36389 // testl %edi, %edi
36391 // cmovgl %edi, %eax
36395 // cmovsl %eax, %edi
36396 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
36397 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
36398 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
36399 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36404 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
36405 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
36406 Cond.getOperand(0), Cond.getOperand(1), NewCC);
36407 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
36412 // Match VSELECTs into subs with unsigned saturation.
36413 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
36414 // psubus is available in SSE2 for i8 and i16 vectors.
36415 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
36416 isPowerOf2_32(VT.getVectorNumElements()) &&
36417 (VT.getVectorElementType() == MVT::i8 ||
36418 VT.getVectorElementType() == MVT::i16)) {
36419 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36421 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
36422 // left side invert the predicate to simplify logic below.
36424 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
36426 CC = ISD::getSetCCInverse(CC, true);
36427 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
36431 if (Other.getNode() && Other->getNumOperands() == 2 &&
36432 Other->getOperand(0) == Cond.getOperand(0)) {
36433 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
36434 SDValue CondRHS = Cond->getOperand(1);
36436 // Look for a general sub with unsigned saturation first.
36437 // x >= y ? x-y : 0 --> subus x, y
36438 // x > y ? x-y : 0 --> subus x, y
36439 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
36440 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
36441 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36443 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
36444 if (isa<BuildVectorSDNode>(CondRHS)) {
36445 // If the RHS is a constant we have to reverse the const
36446 // canonicalization.
36447 // x > C-1 ? x+-C : 0 --> subus x, C
36448 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
36449 return (!Op && !Cond) ||
36451 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
36453 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
36454 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
36455 /*AllowUndefs*/ true)) {
36456 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
36458 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36461 // Another special case: If C was a sign bit, the sub has been
36462 // canonicalized into a xor.
36463 // FIXME: Would it be better to use computeKnownBits to determine
36464 // whether it's safe to decanonicalize the xor?
36465 // x s< 0 ? x^C : 0 --> subus x, C
36466 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
36467 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
36468 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
36469 OpRHSConst->getAPIntValue().isSignMask()) {
36470 // Note that we have to rebuild the RHS constant here to ensure we
36471 // don't rely on particular values of undef lanes.
36472 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
36473 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
36481 // Match VSELECTs into add with unsigned saturation.
36482 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
36483 // paddus is available in SSE2 for i8 and i16 vectors.
36484 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
36485 isPowerOf2_32(VT.getVectorNumElements()) &&
36486 (VT.getVectorElementType() == MVT::i8 ||
36487 VT.getVectorElementType() == MVT::i16)) {
36488 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36490 SDValue CondLHS = Cond->getOperand(0);
36491 SDValue CondRHS = Cond->getOperand(1);
36493 // Check if one of the arms of the VSELECT is vector with all bits set.
36494 // If it's on the left side invert the predicate to simplify logic below.
36496 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
36498 CC = ISD::getSetCCInverse(CC, true);
36499 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
36503 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
36504 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
36506 // Canonicalize condition operands.
36507 if (CC == ISD::SETUGE) {
36508 std::swap(CondLHS, CondRHS);
36512 // We can test against either of the addition operands.
36513 // x <= x+y ? x+y : ~0 --> addus x, y
36514 // x+y >= x ? x+y : ~0 --> addus x, y
36515 if (CC == ISD::SETULE && Other == CondRHS &&
36516 (OpLHS == CondLHS || OpRHS == CondLHS))
36517 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
36519 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
36520 CondLHS == OpLHS) {
36521 // If the RHS is a constant we have to reverse the const
36522 // canonicalization.
36523 // x > ~C ? x+C : ~0 --> addus x, C
36524 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
36525 return Cond->getAPIntValue() == ~Op->getAPIntValue();
36527 if (CC == ISD::SETULE &&
36528 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
36529 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
36534 // Early exit check
36535 if (!TLI.isTypeLegal(VT))
36538 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
36541 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
36544 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
36547 // Custom action for SELECT MMX
36548 if (VT == MVT::x86mmx) {
36549 LHS = DAG.getBitcast(MVT::i64, LHS);
36550 RHS = DAG.getBitcast(MVT::i64, RHS);
36551 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
36552 return DAG.getBitcast(VT, newSelect);
36559 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
36561 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
36562 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
36563 /// Note that this is only legal for some op/cc combinations.
36564 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
36566 const X86Subtarget &Subtarget) {
36567 // This combine only operates on CMP-like nodes.
36568 if (!(Cmp.getOpcode() == X86ISD::CMP ||
36569 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
36572 // Can't replace the cmp if it has more uses than the one we're looking at.
36573 // FIXME: We would like to be able to handle this, but would need to make sure
36574 // all uses were updated.
36575 if (!Cmp.hasOneUse())
36578 // This only applies to variations of the common case:
36579 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
36580 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
36581 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
36582 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
36583 // Using the proper condcodes (see below), overflow is checked for.
36585 // FIXME: We can generalize both constraints:
36586 // - XOR/OR/AND (if they were made to survive AtomicExpand)
36588 // if the result is compared.
36590 SDValue CmpLHS = Cmp.getOperand(0);
36591 SDValue CmpRHS = Cmp.getOperand(1);
36593 if (!CmpLHS.hasOneUse())
36596 unsigned Opc = CmpLHS.getOpcode();
36597 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
36600 SDValue OpRHS = CmpLHS.getOperand(2);
36601 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
36605 APInt Addend = OpRHSC->getAPIntValue();
36606 if (Opc == ISD::ATOMIC_LOAD_SUB)
36609 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
36613 APInt Comparison = CmpRHSC->getAPIntValue();
36615 // If the addend is the negation of the comparison value, then we can do
36616 // a full comparison by emitting the atomic arithmetic as a locked sub.
36617 if (Comparison == -Addend) {
36618 // The CC is fine, but we need to rewrite the LHS of the comparison as an
36620 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
36621 auto AtomicSub = DAG.getAtomic(
36622 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
36623 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
36624 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
36625 AN->getMemOperand());
36626 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
36627 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
36628 DAG.getUNDEF(CmpLHS.getValueType()));
36629 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
36633 // We can handle comparisons with zero in a number of cases by manipulating
36635 if (!Comparison.isNullValue())
36638 if (CC == X86::COND_S && Addend == 1)
36640 else if (CC == X86::COND_NS && Addend == 1)
36642 else if (CC == X86::COND_G && Addend == -1)
36644 else if (CC == X86::COND_LE && Addend == -1)
36649 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
36650 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
36651 DAG.getUNDEF(CmpLHS.getValueType()));
36652 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
36656 // Check whether a boolean test is testing a boolean value generated by
36657 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
36660 // Simplify the following patterns:
36661 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
36662 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
36663 // to (Op EFLAGS Cond)
36665 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
36666 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
36667 // to (Op EFLAGS !Cond)
36669 // where Op could be BRCOND or CMOV.
36671 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
36672 // This combine only operates on CMP-like nodes.
36673 if (!(Cmp.getOpcode() == X86ISD::CMP ||
36674 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
36677 // Quit if not used as a boolean value.
36678 if (CC != X86::COND_E && CC != X86::COND_NE)
36681 // Check CMP operands. One of them should be 0 or 1 and the other should be
36682 // an SetCC or extended from it.
36683 SDValue Op1 = Cmp.getOperand(0);
36684 SDValue Op2 = Cmp.getOperand(1);
36687 const ConstantSDNode* C = nullptr;
36688 bool needOppositeCond = (CC == X86::COND_E);
36689 bool checkAgainstTrue = false; // Is it a comparison against 1?
36691 if ((C = dyn_cast<ConstantSDNode>(Op1)))
36693 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
36695 else // Quit if all operands are not constants.
36698 if (C->getZExtValue() == 1) {
36699 needOppositeCond = !needOppositeCond;
36700 checkAgainstTrue = true;
36701 } else if (C->getZExtValue() != 0)
36702 // Quit if the constant is neither 0 or 1.
36705 bool truncatedToBoolWithAnd = false;
36706 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
36707 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
36708 SetCC.getOpcode() == ISD::TRUNCATE ||
36709 SetCC.getOpcode() == ISD::AND) {
36710 if (SetCC.getOpcode() == ISD::AND) {
36712 if (isOneConstant(SetCC.getOperand(0)))
36714 if (isOneConstant(SetCC.getOperand(1)))
36718 SetCC = SetCC.getOperand(OpIdx);
36719 truncatedToBoolWithAnd = true;
36721 SetCC = SetCC.getOperand(0);
36724 switch (SetCC.getOpcode()) {
36725 case X86ISD::SETCC_CARRY:
36726 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
36727 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
36728 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
36729 // truncated to i1 using 'and'.
36730 if (checkAgainstTrue && !truncatedToBoolWithAnd)
36732 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
36733 "Invalid use of SETCC_CARRY!");
36735 case X86ISD::SETCC:
36736 // Set the condition code or opposite one if necessary.
36737 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
36738 if (needOppositeCond)
36739 CC = X86::GetOppositeBranchCondition(CC);
36740 return SetCC.getOperand(1);
36741 case X86ISD::CMOV: {
36742 // Check whether false/true value has canonical one, i.e. 0 or 1.
36743 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
36744 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
36745 // Quit if true value is not a constant.
36748 // Quit if false value is not a constant.
36750 SDValue Op = SetCC.getOperand(0);
36751 // Skip 'zext' or 'trunc' node.
36752 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
36753 Op.getOpcode() == ISD::TRUNCATE)
36754 Op = Op.getOperand(0);
36755 // A special case for rdrand/rdseed, where 0 is set if false cond is
36757 if ((Op.getOpcode() != X86ISD::RDRAND &&
36758 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
36761 // Quit if false value is not the constant 0 or 1.
36762 bool FValIsFalse = true;
36763 if (FVal && FVal->getZExtValue() != 0) {
36764 if (FVal->getZExtValue() != 1)
36766 // If FVal is 1, opposite cond is needed.
36767 needOppositeCond = !needOppositeCond;
36768 FValIsFalse = false;
36770 // Quit if TVal is not the constant opposite of FVal.
36771 if (FValIsFalse && TVal->getZExtValue() != 1)
36773 if (!FValIsFalse && TVal->getZExtValue() != 0)
36775 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
36776 if (needOppositeCond)
36777 CC = X86::GetOppositeBranchCondition(CC);
36778 return SetCC.getOperand(3);
36785 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
36787 /// (X86or (X86setcc) (X86setcc))
36788 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
36789 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
36790 X86::CondCode &CC1, SDValue &Flags,
36792 if (Cond->getOpcode() == X86ISD::CMP) {
36793 if (!isNullConstant(Cond->getOperand(1)))
36796 Cond = Cond->getOperand(0);
36801 SDValue SetCC0, SetCC1;
36802 switch (Cond->getOpcode()) {
36803 default: return false;
36810 SetCC0 = Cond->getOperand(0);
36811 SetCC1 = Cond->getOperand(1);
36815 // Make sure we have SETCC nodes, using the same flags value.
36816 if (SetCC0.getOpcode() != X86ISD::SETCC ||
36817 SetCC1.getOpcode() != X86ISD::SETCC ||
36818 SetCC0->getOperand(1) != SetCC1->getOperand(1))
36821 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
36822 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
36823 Flags = SetCC0->getOperand(1);
36827 // When legalizing carry, we create carries via add X, -1
36828 // If that comes from an actual carry, via setcc, we use the
36830 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
36831 if (EFLAGS.getOpcode() == X86ISD::ADD) {
36832 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
36833 SDValue Carry = EFLAGS.getOperand(0);
36834 while (Carry.getOpcode() == ISD::TRUNCATE ||
36835 Carry.getOpcode() == ISD::ZERO_EXTEND ||
36836 Carry.getOpcode() == ISD::SIGN_EXTEND ||
36837 Carry.getOpcode() == ISD::ANY_EXTEND ||
36838 (Carry.getOpcode() == ISD::AND &&
36839 isOneConstant(Carry.getOperand(1))))
36840 Carry = Carry.getOperand(0);
36841 if (Carry.getOpcode() == X86ISD::SETCC ||
36842 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
36843 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
36844 uint64_t CarryCC = Carry.getConstantOperandVal(0);
36845 SDValue CarryOp1 = Carry.getOperand(1);
36846 if (CarryCC == X86::COND_B)
36848 if (CarryCC == X86::COND_A) {
36849 // Try to convert COND_A into COND_B in an attempt to facilitate
36850 // materializing "setb reg".
36852 // Do not flip "e > c", where "c" is a constant, because Cmp
36853 // instruction cannot take an immediate as its first operand.
36855 if (CarryOp1.getOpcode() == X86ISD::SUB &&
36856 CarryOp1.getNode()->hasOneUse() &&
36857 CarryOp1.getValueType().isInteger() &&
36858 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
36859 SDValue SubCommute =
36860 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
36861 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
36862 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
36865 // If this is a check of the z flag of an add with 1, switch to the
36867 if (CarryCC == X86::COND_E &&
36868 CarryOp1.getOpcode() == X86ISD::ADD &&
36869 isOneConstant(CarryOp1.getOperand(1)))
36878 /// Optimize an EFLAGS definition used according to the condition code \p CC
36879 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
36880 /// uses of chain values.
36881 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
36883 const X86Subtarget &Subtarget) {
36884 if (CC == X86::COND_B)
36885 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
36888 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
36890 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
36893 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
36894 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
36895 TargetLowering::DAGCombinerInfo &DCI,
36896 const X86Subtarget &Subtarget) {
36899 SDValue FalseOp = N->getOperand(0);
36900 SDValue TrueOp = N->getOperand(1);
36901 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
36902 SDValue Cond = N->getOperand(3);
36904 // cmov X, X, ?, ? --> X
36905 if (TrueOp == FalseOp)
36908 // Try to simplify the EFLAGS and condition code operands.
36909 // We can't always do this as FCMOV only supports a subset of X86 cond.
36910 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
36911 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
36912 SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
36914 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
36918 // If this is a select between two integer constants, try to do some
36919 // optimizations. Note that the operands are ordered the opposite of SELECT
36921 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
36922 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
36923 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
36924 // larger than FalseC (the false value).
36925 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
36926 CC = X86::GetOppositeBranchCondition(CC);
36927 std::swap(TrueC, FalseC);
36928 std::swap(TrueOp, FalseOp);
36931 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
36932 // This is efficient for any integer data type (including i8/i16) and
36934 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
36935 Cond = getSETCC(CC, Cond, DL, DAG);
36937 // Zero extend the condition if needed.
36938 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
36940 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
36941 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
36942 DAG.getConstant(ShAmt, DL, MVT::i8));
36946 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
36947 // for any integer data type, including i8/i16.
36948 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
36949 Cond = getSETCC(CC, Cond, DL, DAG);
36951 // Zero extend the condition if needed.
36952 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
36953 FalseC->getValueType(0), Cond);
36954 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
36955 SDValue(FalseC, 0));
36959 // Optimize cases that will turn into an LEA instruction. This requires
36960 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
36961 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
36962 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
36963 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
36965 bool isFastMultiplier = false;
36967 switch ((unsigned char)Diff) {
36969 case 1: // result = add base, cond
36970 case 2: // result = lea base( , cond*2)
36971 case 3: // result = lea base(cond, cond*2)
36972 case 4: // result = lea base( , cond*4)
36973 case 5: // result = lea base(cond, cond*4)
36974 case 8: // result = lea base( , cond*8)
36975 case 9: // result = lea base(cond, cond*8)
36976 isFastMultiplier = true;
36981 if (isFastMultiplier) {
36982 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
36983 Cond = getSETCC(CC, Cond, DL ,DAG);
36984 // Zero extend the condition if needed.
36985 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
36987 // Scale the condition by the difference.
36989 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
36990 DAG.getConstant(Diff, DL, Cond.getValueType()));
36992 // Add the base if non-zero.
36993 if (FalseC->getAPIntValue() != 0)
36994 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
36995 SDValue(FalseC, 0));
37002 // Handle these cases:
37003 // (select (x != c), e, c) -> select (x != c), e, x),
37004 // (select (x == c), c, e) -> select (x == c), x, e)
37005 // where the c is an integer constant, and the "select" is the combination
37006 // of CMOV and CMP.
37008 // The rationale for this change is that the conditional-move from a constant
37009 // needs two instructions, however, conditional-move from a register needs
37010 // only one instruction.
37012 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
37013 // some instruction-combining opportunities. This opt needs to be
37014 // postponed as late as possible.
37016 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
37017 // the DCI.xxxx conditions are provided to postpone the optimization as
37018 // late as possible.
37020 ConstantSDNode *CmpAgainst = nullptr;
37021 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
37022 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
37023 !isa<ConstantSDNode>(Cond.getOperand(0))) {
37025 if (CC == X86::COND_NE &&
37026 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
37027 CC = X86::GetOppositeBranchCondition(CC);
37028 std::swap(TrueOp, FalseOp);
37031 if (CC == X86::COND_E &&
37032 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
37033 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
37034 DAG.getConstant(CC, DL, MVT::i8), Cond };
37035 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37040 // Fold and/or of setcc's to double CMOV:
37041 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
37042 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
37044 // This combine lets us generate:
37045 // cmovcc1 (jcc1 if we don't have CMOV)
37051 // cmovne (jne if we don't have CMOV)
37052 // When we can't use the CMOV instruction, it might increase branch
37054 // When we can use CMOV, or when there is no mispredict, this improves
37055 // throughput and reduces register pressure.
37057 if (CC == X86::COND_NE) {
37059 X86::CondCode CC0, CC1;
37061 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
37063 std::swap(FalseOp, TrueOp);
37064 CC0 = X86::GetOppositeBranchCondition(CC0);
37065 CC1 = X86::GetOppositeBranchCondition(CC1);
37068 SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
37070 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
37071 SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
37072 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37077 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
37078 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
37079 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
37080 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
37081 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
37082 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
37083 SDValue Add = TrueOp;
37084 SDValue Const = FalseOp;
37085 // Canonicalize the condition code for easier matching and output.
37086 if (CC == X86::COND_E)
37087 std::swap(Add, Const);
37089 // We might have replaced the constant in the cmov with the LHS of the
37090 // compare. If so change it to the RHS of the compare.
37091 if (Const == Cond.getOperand(0))
37092 Const = Cond.getOperand(1);
37094 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
37095 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
37096 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
37097 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
37098 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
37099 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
37100 EVT VT = N->getValueType(0);
37101 // This should constant fold.
37102 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
37103 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
37104 DAG.getConstant(X86::COND_NE, DL, MVT::i8),
37106 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
37113 /// Different mul shrinking modes.
37114 enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
37116 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
37117 EVT VT = N->getOperand(0).getValueType();
37118 if (VT.getScalarSizeInBits() != 32)
37121 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
37122 unsigned SignBits[2] = {1, 1};
37123 bool IsPositive[2] = {false, false};
37124 for (unsigned i = 0; i < 2; i++) {
37125 SDValue Opd = N->getOperand(i);
37127 SignBits[i] = DAG.ComputeNumSignBits(Opd);
37128 IsPositive[i] = DAG.SignBitIsZero(Opd);
37131 bool AllPositive = IsPositive[0] && IsPositive[1];
37132 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
37133 // When ranges are from -128 ~ 127, use MULS8 mode.
37134 if (MinSignBits >= 25)
37136 // When ranges are from 0 ~ 255, use MULU8 mode.
37137 else if (AllPositive && MinSignBits >= 24)
37139 // When ranges are from -32768 ~ 32767, use MULS16 mode.
37140 else if (MinSignBits >= 17)
37142 // When ranges are from 0 ~ 65535, use MULU16 mode.
37143 else if (AllPositive && MinSignBits >= 16)
37150 /// When the operands of vector mul are extended from smaller size values,
37151 /// like i8 and i16, the type of mul may be shrinked to generate more
37152 /// efficient code. Two typical patterns are handled:
37154 /// %2 = sext/zext <N x i8> %1 to <N x i32>
37155 /// %4 = sext/zext <N x i8> %3 to <N x i32>
37156 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37157 /// %5 = mul <N x i32> %2, %4
37160 /// %2 = zext/sext <N x i16> %1 to <N x i32>
37161 /// %4 = zext/sext <N x i16> %3 to <N x i32>
37162 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37163 /// %5 = mul <N x i32> %2, %4
37165 /// There are four mul shrinking modes:
37166 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
37167 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
37168 /// generate pmullw+sext32 for it (MULS8 mode).
37169 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
37170 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
37171 /// generate pmullw+zext32 for it (MULU8 mode).
37172 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
37173 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
37174 /// generate pmullw+pmulhw for it (MULS16 mode).
37175 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
37176 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
37177 /// generate pmullw+pmulhuw for it (MULU16 mode).
37178 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
37179 const X86Subtarget &Subtarget) {
37180 // Check for legality
37181 // pmullw/pmulhw are not supported by SSE.
37182 if (!Subtarget.hasSSE2())
37185 // Check for profitability
37186 // pmulld is supported since SSE41. It is better to use pmulld
37187 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
37189 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
37190 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
37194 if (!canReduceVMulWidth(N, DAG, Mode))
37198 SDValue N0 = N->getOperand(0);
37199 SDValue N1 = N->getOperand(1);
37200 EVT VT = N->getOperand(0).getValueType();
37201 unsigned NumElts = VT.getVectorNumElements();
37202 if ((NumElts % 2) != 0)
37205 unsigned RegSize = 128;
37206 MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
37207 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
37209 // Shrink the operands of mul.
37210 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
37211 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
37213 if (ExperimentalVectorWideningLegalization ||
37214 NumElts >= OpsVT.getVectorNumElements()) {
37215 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
37216 // lower part is needed.
37217 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
37218 if (Mode == MULU8 || Mode == MULS8)
37219 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
37222 MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
37223 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
37224 // the higher part is also needed.
37225 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37226 ReducedVT, NewN0, NewN1);
37228 // Repack the lower part and higher part result of mul into a wider
37230 // Generate shuffle functioning as punpcklwd.
37231 SmallVector<int, 16> ShuffleMask(NumElts);
37232 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37233 ShuffleMask[2 * i] = i;
37234 ShuffleMask[2 * i + 1] = i + NumElts;
37237 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37238 ResLo = DAG.getBitcast(ResVT, ResLo);
37239 // Generate shuffle functioning as punpckhwd.
37240 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37241 ShuffleMask[2 * i] = i + NumElts / 2;
37242 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
37245 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37246 ResHi = DAG.getBitcast(ResVT, ResHi);
37247 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
37250 // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
37251 // to legalize the mul explicitly because implicit legalization for type
37252 // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
37253 // instructions which will not exist when we explicitly legalize it by
37254 // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
37255 // <4 x i16> undef).
37257 // Legalize the operands of mul.
37258 // FIXME: We may be able to handle non-concatenated vectors by insertion.
37259 unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
37260 if ((RegSize % ReducedSizeInBits) != 0)
37263 SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
37264 DAG.getUNDEF(ReducedVT));
37266 NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
37268 NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
37270 if (Mode == MULU8 || Mode == MULS8) {
37271 // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
37273 SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
37275 // convert the type of mul result to VT.
37276 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
37277 SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
37278 : ISD::SIGN_EXTEND_VECTOR_INREG,
37280 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
37281 DAG.getIntPtrConstant(0, DL));
37284 // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
37285 // MULU16/MULS16, both parts are needed.
37286 SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
37287 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37288 OpsVT, NewN0, NewN1);
37290 // Repack the lower part and higher part result of mul into a wider
37291 // result. Make sure the type of mul result is VT.
37292 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
37293 SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
37294 Res = DAG.getBitcast(ResVT, Res);
37295 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
37296 DAG.getIntPtrConstant(0, DL));
37299 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
37300 EVT VT, const SDLoc &DL) {
37302 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
37303 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37304 DAG.getConstant(Mult, DL, VT));
37305 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
37306 DAG.getConstant(Shift, DL, MVT::i8));
37307 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37312 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
37313 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37314 DAG.getConstant(Mul1, DL, VT));
37315 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
37316 DAG.getConstant(Mul2, DL, VT));
37317 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37326 // mul x, 11 => add ((shl (mul x, 5), 1), x)
37327 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
37329 // mul x, 21 => add ((shl (mul x, 5), 2), x)
37330 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
37332 // mul x, 41 => add ((shl (mul x, 5), 3), x)
37333 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
37335 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
37336 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37337 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
37339 // mul x, 19 => add ((shl (mul x, 9), 1), x)
37340 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
37342 // mul x, 37 => add ((shl (mul x, 9), 2), x)
37343 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
37345 // mul x, 73 => add ((shl (mul x, 9), 3), x)
37346 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
37348 // mul x, 13 => add ((shl (mul x, 3), 2), x)
37349 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
37351 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
37352 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
37354 // mul x, 26 => add ((mul (mul x, 5), 5), x)
37355 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
37357 // mul x, 28 => add ((mul (mul x, 9), 3), x)
37358 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
37360 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
37361 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37362 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
37365 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
37366 // by a single LEA.
37367 // First check if this a sum of two power of 2s because that's easy. Then
37368 // count how many zeros are up to the first bit.
37369 // TODO: We can do this even without LEA at a cost of two shifts and an add.
37370 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
37371 unsigned ScaleShift = countTrailingZeros(MulAmt);
37372 if (ScaleShift >= 1 && ScaleShift < 4) {
37373 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
37374 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37375 DAG.getConstant(ShiftAmt, DL, MVT::i8));
37376 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37377 DAG.getConstant(ScaleShift, DL, MVT::i8));
37378 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
37385 // If the upper 17 bits of each element are zero then we can use PMADDWD,
37386 // which is always at least as quick as PMULLD, except on KNL.
37387 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
37388 const X86Subtarget &Subtarget) {
37389 if (!Subtarget.hasSSE2())
37392 if (Subtarget.isPMADDWDSlow())
37395 EVT VT = N->getValueType(0);
37397 // Only support vXi32 vectors.
37398 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
37401 // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
37402 // Also allow v2i32 if it will be widened.
37403 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
37404 if (!((ExperimentalVectorWideningLegalization && VT == MVT::v2i32) ||
37405 DAG.getTargetLoweringInfo().isTypeLegal(WVT)))
37408 SDValue N0 = N->getOperand(0);
37409 SDValue N1 = N->getOperand(1);
37411 // If we are zero extending two steps without SSE4.1, its better to reduce
37412 // the vmul width instead.
37413 if (!Subtarget.hasSSE41() &&
37414 (N0.getOpcode() == ISD::ZERO_EXTEND &&
37415 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
37416 (N1.getOpcode() == ISD::ZERO_EXTEND &&
37417 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
37420 APInt Mask17 = APInt::getHighBitsSet(32, 17);
37421 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
37422 !DAG.MaskedValueIsZero(N0, Mask17))
37425 // Use SplitOpsAndApply to handle AVX splitting.
37426 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37427 ArrayRef<SDValue> Ops) {
37428 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
37429 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
37431 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
37432 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
37436 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
37437 const X86Subtarget &Subtarget) {
37438 if (!Subtarget.hasSSE2())
37441 EVT VT = N->getValueType(0);
37443 // Only support vXi64 vectors.
37444 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
37445 VT.getVectorNumElements() < 2 ||
37446 !isPowerOf2_32(VT.getVectorNumElements()))
37449 SDValue N0 = N->getOperand(0);
37450 SDValue N1 = N->getOperand(1);
37452 // MULDQ returns the 64-bit result of the signed multiplication of the lower
37453 // 32-bits. We can lower with this if the sign bits stretch that far.
37454 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
37455 DAG.ComputeNumSignBits(N1) > 32) {
37456 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37457 ArrayRef<SDValue> Ops) {
37458 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
37460 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
37461 PMULDQBuilder, /*CheckBWI*/false);
37464 // If the upper bits are zero we can use a single pmuludq.
37465 APInt Mask = APInt::getHighBitsSet(64, 32);
37466 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
37467 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
37468 ArrayRef<SDValue> Ops) {
37469 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
37471 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
37472 PMULUDQBuilder, /*CheckBWI*/false);
37478 /// Optimize a single multiply with constant into two operations in order to
37479 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
37480 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
37481 TargetLowering::DAGCombinerInfo &DCI,
37482 const X86Subtarget &Subtarget) {
37483 EVT VT = N->getValueType(0);
37485 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
37488 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
37491 if (DCI.isBeforeLegalize() && VT.isVector())
37492 return reduceVMULWidth(N, DAG, Subtarget);
37494 if (!MulConstantOptimization)
37496 // An imul is usually smaller than the alternative sequence.
37497 if (DAG.getMachineFunction().getFunction().hasMinSize())
37500 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
37503 if (VT != MVT::i64 && VT != MVT::i32)
37506 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
37509 if (isPowerOf2_64(C->getZExtValue()))
37512 int64_t SignMulAmt = C->getSExtValue();
37513 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
37514 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
37517 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
37518 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37519 DAG.getConstant(AbsMulAmt, DL, VT));
37520 if (SignMulAmt < 0)
37521 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37527 uint64_t MulAmt1 = 0;
37528 uint64_t MulAmt2 = 0;
37529 if ((AbsMulAmt % 9) == 0) {
37531 MulAmt2 = AbsMulAmt / 9;
37532 } else if ((AbsMulAmt % 5) == 0) {
37534 MulAmt2 = AbsMulAmt / 5;
37535 } else if ((AbsMulAmt % 3) == 0) {
37537 MulAmt2 = AbsMulAmt / 3;
37541 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
37543 (isPowerOf2_64(MulAmt2) ||
37544 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
37546 if (isPowerOf2_64(MulAmt2) &&
37547 !(SignMulAmt >= 0 && N->hasOneUse() &&
37548 N->use_begin()->getOpcode() == ISD::ADD))
37549 // If second multiplifer is pow2, issue it first. We want the multiply by
37550 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
37551 // is an add. Only do this for positive multiply amounts since the
37552 // negate would prevent it from being used as an address mode anyway.
37553 std::swap(MulAmt1, MulAmt2);
37555 if (isPowerOf2_64(MulAmt1))
37556 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37557 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
37559 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37560 DAG.getConstant(MulAmt1, DL, VT));
37562 if (isPowerOf2_64(MulAmt2))
37563 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
37564 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
37566 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
37567 DAG.getConstant(MulAmt2, DL, VT));
37569 // Negate the result.
37570 if (SignMulAmt < 0)
37571 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37573 } else if (!Subtarget.slowLEA())
37574 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
37577 assert(C->getZExtValue() != 0 &&
37578 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
37579 "Both cases that could cause potential overflows should have "
37580 "already been handled.");
37581 if (isPowerOf2_64(AbsMulAmt - 1)) {
37582 // (mul x, 2^N + 1) => (add (shl x, N), x)
37583 NewMul = DAG.getNode(
37584 ISD::ADD, DL, VT, N->getOperand(0),
37585 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37586 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
37588 // To negate, subtract the number from zero
37589 if (SignMulAmt < 0)
37590 NewMul = DAG.getNode(ISD::SUB, DL, VT,
37591 DAG.getConstant(0, DL, VT), NewMul);
37592 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
37593 // (mul x, 2^N - 1) => (sub (shl x, N), x)
37594 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37595 DAG.getConstant(Log2_64(AbsMulAmt + 1),
37597 // To negate, reverse the operands of the subtract.
37598 if (SignMulAmt < 0)
37599 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
37601 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37602 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
37603 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
37604 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37605 DAG.getConstant(Log2_64(AbsMulAmt - 2),
37607 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
37608 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
37609 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
37610 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
37611 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37612 DAG.getConstant(Log2_64(AbsMulAmt + 2),
37614 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37615 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
37622 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
37623 SDValue N0 = N->getOperand(0);
37624 SDValue N1 = N->getOperand(1);
37625 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
37626 EVT VT = N0.getValueType();
37628 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
37629 // since the result of setcc_c is all zero's or all ones.
37630 if (VT.isInteger() && !VT.isVector() &&
37631 N1C && N0.getOpcode() == ISD::AND &&
37632 N0.getOperand(1).getOpcode() == ISD::Constant) {
37633 SDValue N00 = N0.getOperand(0);
37634 APInt Mask = N0.getConstantOperandAPInt(1);
37635 Mask <<= N1C->getAPIntValue();
37636 bool MaskOK = false;
37637 // We can handle cases concerning bit-widening nodes containing setcc_c if
37638 // we carefully interrogate the mask to make sure we are semantics
37640 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
37641 // of the underlying setcc_c operation if the setcc_c was zero extended.
37642 // Consider the following example:
37643 // zext(setcc_c) -> i32 0x0000FFFF
37644 // c1 -> i32 0x0000FFFF
37645 // c2 -> i32 0x00000001
37646 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
37647 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
37648 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
37650 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
37651 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
37653 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
37654 N00.getOpcode() == ISD::ANY_EXTEND) &&
37655 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
37656 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
37658 if (MaskOK && Mask != 0) {
37660 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
37664 // Hardware support for vector shifts is sparse which makes us scalarize the
37665 // vector operations in many cases. Also, on sandybridge ADD is faster than
37667 // (shl V, 1) -> add V,V
37668 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
37669 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
37670 assert(N0.getValueType().isVector() && "Invalid vector shift type");
37671 // We shift all of the values by one. In many cases we do not have
37672 // hardware support for this operation. This is better expressed as an ADD
37674 if (N1SplatC->getAPIntValue() == 1)
37675 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
37681 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
37682 SDValue N0 = N->getOperand(0);
37683 SDValue N1 = N->getOperand(1);
37684 EVT VT = N0.getValueType();
37685 unsigned Size = VT.getSizeInBits();
37687 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
37688 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
37689 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
37690 // depending on sign of (SarConst - [56,48,32,24,16])
37692 // sexts in X86 are MOVs. The MOVs have the same code size
37693 // as above SHIFTs (only SHIFT on 1 has lower code size).
37694 // However the MOVs have 2 advantages to a SHIFT:
37695 // 1. MOVs can write to a register that differs from source
37696 // 2. MOVs accept memory operands
37698 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
37699 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
37700 N0.getOperand(1).getOpcode() != ISD::Constant)
37703 SDValue N00 = N0.getOperand(0);
37704 SDValue N01 = N0.getOperand(1);
37705 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
37706 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
37707 EVT CVT = N1.getValueType();
37709 if (SarConst.isNegative())
37712 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
37713 unsigned ShiftSize = SVT.getSizeInBits();
37714 // skipping types without corresponding sext/zext and
37715 // ShlConst that is not one of [56,48,32,24,16]
37716 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
37720 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
37721 SarConst = SarConst - (Size - ShiftSize);
37724 else if (SarConst.isNegative())
37725 return DAG.getNode(ISD::SHL, DL, VT, NN,
37726 DAG.getConstant(-SarConst, DL, CVT));
37728 return DAG.getNode(ISD::SRA, DL, VT, NN,
37729 DAG.getConstant(SarConst, DL, CVT));
37734 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
37735 TargetLowering::DAGCombinerInfo &DCI) {
37736 SDValue N0 = N->getOperand(0);
37737 SDValue N1 = N->getOperand(1);
37738 EVT VT = N0.getValueType();
37740 // Only do this on the last DAG combine as it can interfere with other
37742 if (!DCI.isAfterLegalizeDAG())
37745 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
37746 // TODO: This is a generic DAG combine that became an x86-only combine to
37747 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
37748 // and-not ('andn').
37749 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
37752 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
37753 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
37754 if (!ShiftC || !AndC)
37757 // If we can shrink the constant mask below 8-bits or 32-bits, then this
37758 // transform should reduce code size. It may also enable secondary transforms
37759 // from improved known-bits analysis or instruction selection.
37760 APInt MaskVal = AndC->getAPIntValue();
37762 // If this can be matched by a zero extend, don't optimize.
37763 if (MaskVal.isMask()) {
37764 unsigned TO = MaskVal.countTrailingOnes();
37765 if (TO >= 8 && isPowerOf2_32(TO))
37769 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
37770 unsigned OldMaskSize = MaskVal.getMinSignedBits();
37771 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
37772 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
37773 (OldMaskSize > 32 && NewMaskSize <= 32)) {
37774 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
37776 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
37777 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
37778 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
37783 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
37784 TargetLowering::DAGCombinerInfo &DCI,
37785 const X86Subtarget &Subtarget) {
37786 unsigned Opcode = N->getOpcode();
37787 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
37788 "Unexpected shift opcode");
37790 EVT VT = N->getValueType(0);
37791 SDValue N0 = N->getOperand(0);
37792 SDValue N1 = N->getOperand(1);
37793 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
37794 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
37795 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
37796 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
37797 "Unexpected PACKSS/PACKUS input type");
37799 bool IsSigned = (X86ISD::PACKSS == Opcode);
37801 // Constant Folding.
37802 APInt UndefElts0, UndefElts1;
37803 SmallVector<APInt, 32> EltBits0, EltBits1;
37804 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
37805 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
37806 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
37807 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
37808 unsigned NumLanes = VT.getSizeInBits() / 128;
37809 unsigned NumDstElts = VT.getVectorNumElements();
37810 unsigned NumSrcElts = NumDstElts / 2;
37811 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
37812 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
37814 APInt Undefs(NumDstElts, 0);
37815 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
37816 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
37817 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
37818 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
37819 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
37820 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
37822 if (UndefElts[SrcIdx]) {
37823 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
37827 APInt &Val = EltBits[SrcIdx];
37829 // PACKSS: Truncate signed value with signed saturation.
37830 // Source values less than dst minint are saturated to minint.
37831 // Source values greater than dst maxint are saturated to maxint.
37832 if (Val.isSignedIntN(DstBitsPerElt))
37833 Val = Val.trunc(DstBitsPerElt);
37834 else if (Val.isNegative())
37835 Val = APInt::getSignedMinValue(DstBitsPerElt);
37837 Val = APInt::getSignedMaxValue(DstBitsPerElt);
37839 // PACKUS: Truncate signed value with unsigned saturation.
37840 // Source values less than zero are saturated to zero.
37841 // Source values greater than dst maxuint are saturated to maxuint.
37842 if (Val.isIntN(DstBitsPerElt))
37843 Val = Val.trunc(DstBitsPerElt);
37844 else if (Val.isNegative())
37845 Val = APInt::getNullValue(DstBitsPerElt);
37847 Val = APInt::getAllOnesValue(DstBitsPerElt);
37849 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
37853 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
37856 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
37857 // truncate to create a larger truncate.
37858 if (Subtarget.hasAVX512() &&
37859 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
37860 N0.getOperand(0).getValueType() == MVT::v8i32) {
37861 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
37863 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
37864 if (Subtarget.hasVLX())
37865 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
37867 // Widen input to v16i32 so we can truncate that.
37869 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
37870 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
37871 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
37875 // Attempt to combine as shuffle.
37877 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
37883 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
37884 TargetLowering::DAGCombinerInfo &DCI,
37885 const X86Subtarget &Subtarget) {
37886 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
37887 X86ISD::VSRL == N->getOpcode()) &&
37888 "Unexpected shift opcode");
37889 EVT VT = N->getValueType(0);
37890 SDValue N0 = N->getOperand(0);
37891 SDValue N1 = N->getOperand(1);
37893 // Shift zero -> zero.
37894 if (ISD::isBuildVectorAllZeros(N0.getNode()))
37895 return DAG.getConstant(0, SDLoc(N), VT);
37897 // Detect constant shift amounts.
37899 SmallVector<APInt, 32> EltBits;
37900 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
37901 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
37902 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
37903 EltBits[0].getZExtValue(), DAG);
37906 APInt KnownUndef, KnownZero;
37907 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37908 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
37909 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
37911 return SDValue(N, 0);
37916 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
37917 TargetLowering::DAGCombinerInfo &DCI,
37918 const X86Subtarget &Subtarget) {
37919 unsigned Opcode = N->getOpcode();
37920 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
37921 X86ISD::VSRLI == Opcode) &&
37922 "Unexpected shift opcode");
37923 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
37924 EVT VT = N->getValueType(0);
37925 SDValue N0 = N->getOperand(0);
37926 SDValue N1 = N->getOperand(1);
37927 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
37928 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
37929 "Unexpected value type");
37930 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
37932 // Out of range logical bit shifts are guaranteed to be zero.
37933 // Out of range arithmetic bit shifts splat the sign bit.
37934 unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
37935 if (ShiftVal >= NumBitsPerElt) {
37937 return DAG.getConstant(0, SDLoc(N), VT);
37939 ShiftVal = NumBitsPerElt - 1;
37942 // Shift N0 by zero -> N0.
37946 // Shift zero -> zero.
37947 if (ISD::isBuildVectorAllZeros(N0.getNode()))
37948 return DAG.getConstant(0, SDLoc(N), VT);
37950 // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
37951 // clamped to (NumBitsPerElt - 1).
37952 if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
37953 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
37954 unsigned NewShiftVal = ShiftVal + ShiftVal2;
37955 if (NewShiftVal >= NumBitsPerElt)
37956 NewShiftVal = NumBitsPerElt - 1;
37957 return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
37958 DAG.getConstant(NewShiftVal, SDLoc(N), MVT::i8));
37961 // We can decode 'whole byte' logical bit shifts as shuffles.
37962 if (LogicalShift && (ShiftVal % 8) == 0) {
37964 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
37968 // Constant Folding.
37970 SmallVector<APInt, 32> EltBits;
37971 if (N->isOnlyUserOf(N0.getNode()) &&
37972 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
37973 assert(EltBits.size() == VT.getVectorNumElements() &&
37974 "Unexpected shift value type");
37975 for (APInt &Elt : EltBits) {
37976 if (X86ISD::VSHLI == Opcode)
37978 else if (X86ISD::VSRAI == Opcode)
37979 Elt.ashrInPlace(ShiftVal);
37981 Elt.lshrInPlace(ShiftVal);
37983 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
37986 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37987 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
37988 APInt::getAllOnesValue(NumBitsPerElt), DCI))
37989 return SDValue(N, 0);
37994 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
37995 TargetLowering::DAGCombinerInfo &DCI,
37996 const X86Subtarget &Subtarget) {
37997 EVT VT = N->getValueType(0);
37998 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
37999 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
38000 "Unexpected vector insertion");
38002 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
38003 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38004 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
38005 APInt::getAllOnesValue(NumBitsPerElt), DCI))
38006 return SDValue(N, 0);
38008 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
38010 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38016 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
38017 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
38018 /// OR -> CMPNEQSS.
38019 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
38020 TargetLowering::DAGCombinerInfo &DCI,
38021 const X86Subtarget &Subtarget) {
38024 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
38025 // we're requiring SSE2 for both.
38026 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
38027 SDValue N0 = N->getOperand(0);
38028 SDValue N1 = N->getOperand(1);
38029 SDValue CMP0 = N0.getOperand(1);
38030 SDValue CMP1 = N1.getOperand(1);
38033 // The SETCCs should both refer to the same CMP.
38034 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
38037 SDValue CMP00 = CMP0->getOperand(0);
38038 SDValue CMP01 = CMP0->getOperand(1);
38039 EVT VT = CMP00.getValueType();
38041 if (VT == MVT::f32 || VT == MVT::f64) {
38042 bool ExpectingFlags = false;
38043 // Check for any users that want flags:
38044 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
38045 !ExpectingFlags && UI != UE; ++UI)
38046 switch (UI->getOpcode()) {
38051 ExpectingFlags = true;
38053 case ISD::CopyToReg:
38054 case ISD::SIGN_EXTEND:
38055 case ISD::ZERO_EXTEND:
38056 case ISD::ANY_EXTEND:
38060 if (!ExpectingFlags) {
38061 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
38062 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
38064 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
38065 X86::CondCode tmp = cc0;
38070 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
38071 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
38072 // FIXME: need symbolic constants for these magic numbers.
38073 // See X86ATTInstPrinter.cpp:printSSECC().
38074 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
38075 if (Subtarget.hasAVX512()) {
38077 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
38078 DAG.getConstant(x86cc, DL, MVT::i8));
38079 // Need to fill with zeros to ensure the bitcast will produce zeroes
38080 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
38081 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
38082 DAG.getConstant(0, DL, MVT::v16i1),
38083 FSetCC, DAG.getIntPtrConstant(0, DL));
38084 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
38085 N->getSimpleValueType(0));
38087 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
38088 CMP00.getValueType(), CMP00, CMP01,
38089 DAG.getConstant(x86cc, DL,
38092 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
38093 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
38095 if (is64BitFP && !Subtarget.is64Bit()) {
38096 // On a 32-bit target, we cannot bitcast the 64-bit float to a
38097 // 64-bit integer, since that's not a legal type. Since
38098 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
38099 // bits, but can do this little dance to extract the lowest 32 bits
38100 // and work with those going forward.
38101 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
38103 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
38104 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
38105 Vector32, DAG.getIntPtrConstant(0, DL));
38109 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
38110 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
38111 DAG.getConstant(1, DL, IntVT));
38112 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
38114 return OneBitOfTruth;
38122 // Match (xor X, -1) -> X.
38123 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
38124 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
38125 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
38126 V = peekThroughBitcasts(V);
38127 if (V.getOpcode() == ISD::XOR &&
38128 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
38129 return V.getOperand(0);
38130 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38131 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
38132 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
38133 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
38134 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
38135 Not, V.getOperand(1));
38138 SmallVector<SDValue, 2> CatOps;
38139 if (collectConcatOps(V.getNode(), CatOps)) {
38140 for (SDValue &CatOp : CatOps) {
38141 SDValue NotCat = IsNOT(CatOp, DAG);
38142 if (!NotCat) return SDValue();
38143 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
38145 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
38150 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
38151 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
38152 assert(N->getOpcode() == ISD::AND);
38154 MVT VT = N->getSimpleValueType(0);
38155 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
38159 SDValue N0 = N->getOperand(0);
38160 SDValue N1 = N->getOperand(1);
38162 if (SDValue Not = IsNOT(N0, DAG)) {
38165 } else if (SDValue Not = IsNOT(N1, DAG)) {
38171 X = DAG.getBitcast(VT, X);
38172 Y = DAG.getBitcast(VT, Y);
38173 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
38176 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
38177 // register. In most cases we actually compare or select YMM-sized registers
38178 // and mixing the two types creates horrible code. This method optimizes
38179 // some of the transition sequences.
38180 // Even with AVX-512 this is still useful for removing casts around logical
38181 // operations on vXi1 mask types.
38182 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
38183 const X86Subtarget &Subtarget) {
38184 EVT VT = N->getValueType(0);
38185 assert(VT.isVector() && "Expected vector type");
38187 assert((N->getOpcode() == ISD::ANY_EXTEND ||
38188 N->getOpcode() == ISD::ZERO_EXTEND ||
38189 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
38191 SDValue Narrow = N->getOperand(0);
38192 EVT NarrowVT = Narrow.getValueType();
38194 if (Narrow->getOpcode() != ISD::XOR &&
38195 Narrow->getOpcode() != ISD::AND &&
38196 Narrow->getOpcode() != ISD::OR)
38199 SDValue N0 = Narrow->getOperand(0);
38200 SDValue N1 = Narrow->getOperand(1);
38203 // The Left side has to be a trunc.
38204 if (N0.getOpcode() != ISD::TRUNCATE)
38207 // The type of the truncated inputs.
38208 if (N0.getOperand(0).getValueType() != VT)
38211 // The right side has to be a 'trunc' or a constant vector.
38212 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
38213 N1.getOperand(0).getValueType() == VT;
38215 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
38218 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38220 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
38223 // Set N0 and N1 to hold the inputs to the new wide operation.
38224 N0 = N0.getOperand(0);
38226 N1 = N1.getOperand(0);
38228 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
38230 // Generate the wide operation.
38231 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
38232 unsigned Opcode = N->getOpcode();
38234 default: llvm_unreachable("Unexpected opcode");
38235 case ISD::ANY_EXTEND:
38237 case ISD::ZERO_EXTEND:
38238 return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
38239 case ISD::SIGN_EXTEND:
38240 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
38241 Op, DAG.getValueType(NarrowVT));
38245 /// If both input operands of a logic op are being cast from floating point
38246 /// types, try to convert this into a floating point logic node to avoid
38247 /// unnecessary moves from SSE to integer registers.
38248 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
38249 const X86Subtarget &Subtarget) {
38250 EVT VT = N->getValueType(0);
38251 SDValue N0 = N->getOperand(0);
38252 SDValue N1 = N->getOperand(1);
38255 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
38258 SDValue N00 = N0.getOperand(0);
38259 SDValue N10 = N1.getOperand(0);
38260 EVT N00Type = N00.getValueType();
38261 EVT N10Type = N10.getValueType();
38263 // Ensure that both types are the same and are legal scalar fp types.
38264 if (N00Type != N10Type ||
38265 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
38266 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
38270 switch (N->getOpcode()) {
38271 default: llvm_unreachable("Unexpected input node for FP logic conversion");
38272 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38273 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38274 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38277 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
38278 return DAG.getBitcast(VT, FPLogic);
38281 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
38282 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
38283 /// with a shift-right to eliminate loading the vector constant mask value.
38284 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
38285 const X86Subtarget &Subtarget) {
38286 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
38287 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
38288 EVT VT0 = Op0.getValueType();
38289 EVT VT1 = Op1.getValueType();
38291 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
38295 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
38296 !SplatVal.isMask())
38299 // Don't prevent creation of ANDN.
38300 if (isBitwiseNot(Op0))
38303 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
38306 unsigned EltBitWidth = VT0.getScalarSizeInBits();
38307 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
38311 unsigned ShiftVal = SplatVal.countTrailingOnes();
38312 SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
38313 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
38314 return DAG.getBitcast(N->getValueType(0), Shift);
38317 // Get the index node from the lowered DAG of a GEP IR instruction with one
38318 // indexing dimension.
38319 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
38320 if (Ld->isIndexed())
38323 SDValue Base = Ld->getBasePtr();
38325 if (Base.getOpcode() != ISD::ADD)
38328 SDValue ShiftedIndex = Base.getOperand(0);
38330 if (ShiftedIndex.getOpcode() != ISD::SHL)
38333 return ShiftedIndex.getOperand(0);
38337 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
38338 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
38339 switch (VT.getSizeInBits()) {
38340 default: return false;
38341 case 64: return Subtarget.is64Bit() ? true : false;
38342 case 32: return true;
38348 // This function recognizes cases where X86 bzhi instruction can replace and
38349 // 'and-load' sequence.
38350 // In case of loading integer value from an array of constants which is defined
38353 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
38355 // then applying a bitwise and on the result with another input.
38356 // It's equivalent to performing bzhi (zero high bits) on the input, with the
38357 // same index of the load.
38358 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
38359 const X86Subtarget &Subtarget) {
38360 MVT VT = Node->getSimpleValueType(0);
38363 // Check if subtarget has BZHI instruction for the node's type
38364 if (!hasBZHI(Subtarget, VT))
38367 // Try matching the pattern for both operands.
38368 for (unsigned i = 0; i < 2; i++) {
38369 SDValue N = Node->getOperand(i);
38370 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
38372 // continue if the operand is not a load instruction
38376 const Value *MemOp = Ld->getMemOperand()->getValue();
38381 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
38382 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
38383 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
38385 Constant *Init = GV->getInitializer();
38386 Type *Ty = Init->getType();
38387 if (!isa<ConstantDataArray>(Init) ||
38388 !Ty->getArrayElementType()->isIntegerTy() ||
38389 Ty->getArrayElementType()->getScalarSizeInBits() !=
38390 VT.getSizeInBits() ||
38391 Ty->getArrayNumElements() >
38392 Ty->getArrayElementType()->getScalarSizeInBits())
38395 // Check if the array's constant elements are suitable to our case.
38396 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
38397 bool ConstantsMatch = true;
38398 for (uint64_t j = 0; j < ArrayElementCount; j++) {
38399 ConstantInt *Elem =
38400 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
38401 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
38402 ConstantsMatch = false;
38406 if (!ConstantsMatch)
38409 // Do the transformation (For 32-bit type):
38410 // -> (and (load arr[idx]), inp)
38411 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
38412 // that will be replaced with one bzhi instruction.
38413 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
38414 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
38416 // Get the Node which indexes into the array.
38417 SDValue Index = getIndexFromUnindexedLoad(Ld);
38420 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
38422 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
38423 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
38425 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
38426 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
38428 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
38436 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
38437 // Turn it into series of XORs and a setnp.
38438 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
38439 const X86Subtarget &Subtarget) {
38440 EVT VT = N->getValueType(0);
38442 // We only support 64-bit and 32-bit. 64-bit requires special handling
38443 // unless the 64-bit popcnt instruction is legal.
38444 if (VT != MVT::i32 && VT != MVT::i64)
38447 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38448 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
38451 SDValue N0 = N->getOperand(0);
38452 SDValue N1 = N->getOperand(1);
38454 // LHS needs to be a single use CTPOP.
38455 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
38458 // RHS needs to be 1.
38459 if (!isOneConstant(N1))
38463 SDValue X = N0.getOperand(0);
38465 // If this is 64-bit, its always best to xor the two 32-bit pieces together
38466 // even if we have popcnt.
38467 if (VT == MVT::i64) {
38468 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
38469 DAG.getNode(ISD::SRL, DL, VT, X,
38470 DAG.getConstant(32, DL, MVT::i8)));
38471 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
38472 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
38473 // Generate a 32-bit parity idiom. This will bring us back here if we need
38474 // to expand it too.
38475 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
38476 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
38477 DAG.getConstant(1, DL, MVT::i32));
38478 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
38480 assert(VT == MVT::i32 && "Unexpected VT!");
38482 // Xor the high and low 16-bits together using a 32-bit operation.
38483 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
38484 DAG.getConstant(16, DL, MVT::i8));
38485 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
38487 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
38488 // This should allow an h-reg to be used to save a shift.
38489 // FIXME: We only get an h-reg in 32-bit mode.
38490 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
38491 DAG.getNode(ISD::SRL, DL, VT, X,
38492 DAG.getConstant(8, DL, MVT::i8)));
38493 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
38494 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
38495 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
38497 // Copy the inverse of the parity flag into a register with setcc.
38498 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
38499 // Zero extend to original type.
38500 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
38503 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
38504 TargetLowering::DAGCombinerInfo &DCI,
38505 const X86Subtarget &Subtarget) {
38506 EVT VT = N->getValueType(0);
38508 // If this is SSE1 only convert to FAND to avoid scalarization.
38509 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
38510 return DAG.getBitcast(
38511 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
38512 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
38513 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
38516 // Use a 32-bit and+zext if upper bits known zero.
38517 if (VT == MVT::i64 && Subtarget.is64Bit() &&
38518 !isa<ConstantSDNode>(N->getOperand(1))) {
38519 APInt HiMask = APInt::getHighBitsSet(64, 32);
38520 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
38521 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
38523 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
38524 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
38525 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
38526 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
38530 // This must be done before legalization has expanded the ctpop.
38531 if (SDValue V = combineParity(N, DAG, Subtarget))
38534 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
38535 // TODO: Support multiple SrcOps.
38536 if (VT == MVT::i1) {
38537 SmallVector<SDValue, 2> SrcOps;
38538 if (matchBitOpReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
38539 SrcOps.size() == 1) {
38541 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
38542 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38543 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
38545 APInt AllBits = APInt::getAllOnesValue(NumElts);
38546 return DAG.getSetCC(dl, MVT::i1, Mask,
38547 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
38552 if (DCI.isBeforeLegalizeOps())
38555 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
38558 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
38561 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
38564 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
38567 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
38570 // Attempt to recursively combine a bitmask AND with shuffles.
38571 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
38573 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38577 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
38578 if ((VT.getScalarSizeInBits() % 8) == 0 &&
38579 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
38580 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
38581 SDValue BitMask = N->getOperand(1);
38582 SDValue SrcVec = N->getOperand(0).getOperand(0);
38583 EVT SrcVecVT = SrcVec.getValueType();
38585 // Check that the constant bitmask masks whole bytes.
38587 SmallVector<APInt, 64> EltBits;
38588 if (VT == SrcVecVT.getScalarType() &&
38589 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
38590 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
38591 llvm::all_of(EltBits, [](APInt M) {
38592 return M.isNullValue() || M.isAllOnesValue();
38594 unsigned NumElts = SrcVecVT.getVectorNumElements();
38595 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
38596 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
38598 // Create a root shuffle mask from the byte mask and the extracted index.
38599 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
38600 for (unsigned i = 0; i != Scale; ++i) {
38603 int VecIdx = Scale * Idx + i;
38604 ShuffleMask[VecIdx] =
38605 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
38608 if (SDValue Shuffle = combineX86ShufflesRecursively(
38609 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 2,
38610 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
38611 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
38612 N->getOperand(0).getOperand(1));
38619 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
38620 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
38621 const X86Subtarget &Subtarget) {
38622 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
38624 EVT VT = N->getValueType(0);
38625 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
38628 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
38629 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
38630 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
38633 // On XOP we'll lower to PCMOV so accept one use, otherwise only
38634 // do this if either mask has multiple uses already.
38635 if (!(Subtarget.hasXOP() || !N0.getOperand(1).hasOneUse() ||
38636 !N1.getOperand(1).hasOneUse()))
38639 // Attempt to extract constant byte masks.
38640 APInt UndefElts0, UndefElts1;
38641 SmallVector<APInt, 32> EltBits0, EltBits1;
38642 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
38645 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
38649 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
38650 // TODO - add UNDEF elts support.
38651 if (UndefElts0[i] || UndefElts1[i])
38653 if (EltBits0[i] != ~EltBits1[i])
38658 SDValue X = N->getOperand(0);
38660 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
38661 DAG.getBitcast(VT, N1.getOperand(0)));
38662 return DAG.getNode(ISD::OR, DL, VT, X, Y);
38665 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
38666 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
38667 if (N->getOpcode() != ISD::OR)
38670 SDValue N0 = N->getOperand(0);
38671 SDValue N1 = N->getOperand(1);
38673 // Canonicalize AND to LHS.
38674 if (N1.getOpcode() == ISD::AND)
38677 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
38678 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
38681 Mask = N1.getOperand(0);
38682 X = N1.getOperand(1);
38684 // Check to see if the mask appeared in both the AND and ANDNP.
38685 if (N0.getOperand(0) == Mask)
38686 Y = N0.getOperand(1);
38687 else if (N0.getOperand(1) == Mask)
38688 Y = N0.getOperand(0);
38692 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
38693 // ANDNP combine allows other combines to happen that prevent matching.
38698 // (or (and (M, (sub 0, X)), (pandn M, X)))
38699 // which is a special case of vselect:
38700 // (vselect M, (sub 0, X), X)
38702 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
38703 // We know that, if fNegate is 0 or 1:
38704 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
38706 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
38707 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
38708 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
38709 // This lets us transform our vselect to:
38710 // (add (xor X, M), (and M, 1))
38712 // (sub (xor X, M), M)
38713 static SDValue combineLogicBlendIntoConditionalNegate(
38714 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
38715 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
38716 EVT MaskVT = Mask.getValueType();
38717 assert(MaskVT.isInteger() &&
38718 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
38719 "Mask must be zero/all-bits");
38721 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
38723 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
38726 auto IsNegV = [](SDNode *N, SDValue V) {
38727 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
38728 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
38732 if (IsNegV(Y.getNode(), X))
38734 else if (IsNegV(X.getNode(), Y))
38739 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
38740 SDValue SubOp2 = Mask;
38742 // If the negate was on the false side of the select, then
38743 // the operands of the SUB need to be swapped. PR 27251.
38744 // This is because the pattern being matched above is
38745 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
38746 // but if the pattern matched was
38747 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
38748 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
38749 // pattern also needs to be a negation of the replacement pattern above.
38750 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
38751 // sub accomplishes the negation of the replacement pattern.
38753 std::swap(SubOp1, SubOp2);
38755 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
38756 return DAG.getBitcast(VT, Res);
38760 // (or (and (m, y), (pandn m, x)))
38762 // (vselect m, x, y)
38763 // As a special case, try to fold:
38764 // (or (and (m, (sub 0, x)), (pandn m, x)))
38766 // (sub (xor X, M), M)
38767 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
38768 const X86Subtarget &Subtarget) {
38769 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
38771 EVT VT = N->getValueType(0);
38772 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
38773 (VT.is256BitVector() && Subtarget.hasInt256())))
38776 SDValue X, Y, Mask;
38777 if (!matchLogicBlend(N, X, Y, Mask))
38780 // Validate that X, Y, and Mask are bitcasts, and see through them.
38781 Mask = peekThroughBitcasts(Mask);
38782 X = peekThroughBitcasts(X);
38783 Y = peekThroughBitcasts(Y);
38785 EVT MaskVT = Mask.getValueType();
38786 unsigned EltBits = MaskVT.getScalarSizeInBits();
38788 // TODO: Attempt to handle floating point cases as well?
38789 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
38794 // Attempt to combine to conditional negate: (sub (xor X, M), M)
38795 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
38799 // PBLENDVB is only available on SSE 4.1.
38800 if (!Subtarget.hasSSE41())
38803 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
38805 X = DAG.getBitcast(BlendVT, X);
38806 Y = DAG.getBitcast(BlendVT, Y);
38807 Mask = DAG.getBitcast(BlendVT, Mask);
38808 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
38809 return DAG.getBitcast(VT, Mask);
38812 // Helper function for combineOrCmpEqZeroToCtlzSrl
38816 // srl(ctlz x), log2(bitsize(x))
38817 // Input pattern is checked by caller.
38818 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
38819 SelectionDAG &DAG) {
38820 SDValue Cmp = Op.getOperand(1);
38821 EVT VT = Cmp.getOperand(0).getValueType();
38822 unsigned Log2b = Log2_32(VT.getSizeInBits());
38824 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
38825 // The result of the shift is true or false, and on X86, the 32-bit
38826 // encoding of shr and lzcnt is more desirable.
38827 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
38828 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
38829 DAG.getConstant(Log2b, dl, MVT::i8));
38830 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
38833 // Try to transform:
38834 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
38836 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
38837 // Will also attempt to match more generic cases, eg:
38838 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
38839 // Only applies if the target supports the FastLZCNT feature.
38840 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
38841 TargetLowering::DAGCombinerInfo &DCI,
38842 const X86Subtarget &Subtarget) {
38843 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
38846 auto isORCandidate = [](SDValue N) {
38847 return (N->getOpcode() == ISD::OR && N->hasOneUse());
38850 // Check the zero extend is extending to 32-bit or more. The code generated by
38851 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
38852 // instructions to clear the upper bits.
38853 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
38854 !isORCandidate(N->getOperand(0)))
38857 // Check the node matches: setcc(eq, cmp 0)
38858 auto isSetCCCandidate = [](SDValue N) {
38859 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
38860 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
38861 N->getOperand(1).getOpcode() == X86ISD::CMP &&
38862 isNullConstant(N->getOperand(1).getOperand(1)) &&
38863 N->getOperand(1).getValueType().bitsGE(MVT::i32);
38866 SDNode *OR = N->getOperand(0).getNode();
38867 SDValue LHS = OR->getOperand(0);
38868 SDValue RHS = OR->getOperand(1);
38870 // Save nodes matching or(or, setcc(eq, cmp 0)).
38871 SmallVector<SDNode *, 2> ORNodes;
38872 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
38873 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
38874 ORNodes.push_back(OR);
38875 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
38876 LHS = OR->getOperand(0);
38877 RHS = OR->getOperand(1);
38880 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
38881 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
38882 !isORCandidate(SDValue(OR, 0)))
38885 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
38887 // or(srl(ctlz),srl(ctlz)).
38888 // The dag combiner can then fold it into:
38889 // srl(or(ctlz, ctlz)).
38890 EVT VT = OR->getValueType(0);
38891 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
38892 SDValue Ret, NewRHS;
38893 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
38894 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
38899 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
38900 while (ORNodes.size() > 0) {
38901 OR = ORNodes.pop_back_val();
38902 LHS = OR->getOperand(0);
38903 RHS = OR->getOperand(1);
38904 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
38905 if (RHS->getOpcode() == ISD::OR)
38906 std::swap(LHS, RHS);
38907 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
38910 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
38914 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
38919 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
38920 TargetLowering::DAGCombinerInfo &DCI,
38921 const X86Subtarget &Subtarget) {
38922 SDValue N0 = N->getOperand(0);
38923 SDValue N1 = N->getOperand(1);
38924 EVT VT = N->getValueType(0);
38926 // If this is SSE1 only convert to FOR to avoid scalarization.
38927 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
38928 return DAG.getBitcast(MVT::v4i32,
38929 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
38930 DAG.getBitcast(MVT::v4f32, N0),
38931 DAG.getBitcast(MVT::v4f32, N1)));
38934 if (DCI.isBeforeLegalizeOps())
38937 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
38940 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
38943 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
38946 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
38949 // Attempt to recursively combine an OR of shuffles.
38950 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
38952 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38956 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
38959 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
38960 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
38961 unsigned Bits = VT.getScalarSizeInBits();
38963 // SHLD/SHRD instructions have lower register pressure, but on some
38964 // platforms they have higher latency than the equivalent
38965 // series of shifts/or that would otherwise be generated.
38966 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
38967 // have higher latencies and we are not optimizing for size.
38968 if (!OptForSize && Subtarget.isSHLDSlow())
38971 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
38973 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
38975 if (!N0.hasOneUse() || !N1.hasOneUse())
38978 SDValue ShAmt0 = N0.getOperand(1);
38979 if (ShAmt0.getValueType() != MVT::i8)
38981 SDValue ShAmt1 = N1.getOperand(1);
38982 if (ShAmt1.getValueType() != MVT::i8)
38985 // Peek through any modulo shift masks.
38987 if (ShAmt0.getOpcode() == ISD::AND &&
38988 isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
38989 ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
38991 ShAmt0 = ShAmt0.getOperand(0);
38994 if (ShAmt1.getOpcode() == ISD::AND &&
38995 isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
38996 ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
38998 ShAmt1 = ShAmt1.getOperand(0);
39001 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
39002 ShAmt0 = ShAmt0.getOperand(0);
39003 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
39004 ShAmt1 = ShAmt1.getOperand(0);
39007 unsigned Opc = ISD::FSHL;
39008 SDValue Op0 = N0.getOperand(0);
39009 SDValue Op1 = N1.getOperand(0);
39010 if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
39012 std::swap(Op0, Op1);
39013 std::swap(ShAmt0, ShAmt1);
39014 std::swap(ShMsk0, ShMsk1);
39017 auto GetFunnelShift = [&DAG, &DL, VT, Opc](SDValue Op0, SDValue Op1,
39019 if (Opc == ISD::FSHR)
39020 std::swap(Op0, Op1);
39021 return DAG.getNode(Opc, DL, VT, Op0, Op1,
39022 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Amt));
39025 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
39026 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
39027 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
39028 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
39029 // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
39030 // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
39031 if (ShAmt1.getOpcode() == ISD::SUB) {
39032 SDValue Sum = ShAmt1.getOperand(0);
39033 if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
39034 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
39035 if (ShAmt1Op1.getOpcode() == ISD::AND &&
39036 isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
39037 ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
39038 ShMsk1 = ShAmt1Op1;
39039 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39041 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
39042 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39043 if ((SumC->getAPIntValue() == Bits ||
39044 (SumC->getAPIntValue() == 0 && ShMsk1)) &&
39045 ShAmt1Op1 == ShAmt0)
39046 return GetFunnelShift(Op0, Op1, ShAmt0);
39048 } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
39049 auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
39050 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
39051 return GetFunnelShift(Op0, Op1, ShAmt0);
39052 } else if (ShAmt1.getOpcode() == ISD::XOR) {
39053 SDValue Mask = ShAmt1.getOperand(1);
39054 if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
39055 unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
39056 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
39057 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
39058 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
39059 if (MaskC->getSExtValue() == (Bits - 1) &&
39060 (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
39061 if (Op1.getOpcode() == InnerShift &&
39062 isa<ConstantSDNode>(Op1.getOperand(1)) &&
39063 Op1.getConstantOperandAPInt(1) == 1) {
39064 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39066 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
39067 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
39068 Op1.getOperand(0) == Op1.getOperand(1)) {
39069 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39078 /// Try to turn tests against the signbit in the form of:
39079 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
39082 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
39083 // This is only worth doing if the output type is i8 or i1.
39084 EVT ResultType = N->getValueType(0);
39085 if (ResultType != MVT::i8 && ResultType != MVT::i1)
39088 SDValue N0 = N->getOperand(0);
39089 SDValue N1 = N->getOperand(1);
39091 // We should be performing an xor against a truncated shift.
39092 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
39095 // Make sure we are performing an xor against one.
39096 if (!isOneConstant(N1))
39099 // SetCC on x86 zero extends so only act on this if it's a logical shift.
39100 SDValue Shift = N0.getOperand(0);
39101 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
39104 // Make sure we are truncating from one of i16, i32 or i64.
39105 EVT ShiftTy = Shift.getValueType();
39106 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
39109 // Make sure the shift amount extracts the sign bit.
39110 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
39111 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
39114 // Create a greater-than comparison against -1.
39115 // N.B. Using SETGE against 0 works but we want a canonical looking
39116 // comparison, using SETGT matches up with what TranslateX86CC.
39118 SDValue ShiftOp = Shift.getOperand(0);
39119 EVT ShiftOpTy = ShiftOp.getValueType();
39120 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39121 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
39122 *DAG.getContext(), ResultType);
39123 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
39124 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
39125 if (SetCCResultType != ResultType)
39126 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
39130 /// Turn vector tests of the signbit in the form of:
39131 /// xor (sra X, elt_size(X)-1), -1
39135 /// This should be called before type legalization because the pattern may not
39136 /// persist after that.
39137 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
39138 const X86Subtarget &Subtarget) {
39139 EVT VT = N->getValueType(0);
39140 if (!VT.isSimple())
39143 switch (VT.getSimpleVT().SimpleTy) {
39144 default: return SDValue();
39147 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
39148 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
39152 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
39155 // There must be a shift right algebraic before the xor, and the xor must be a
39156 // 'not' operation.
39157 SDValue Shift = N->getOperand(0);
39158 SDValue Ones = N->getOperand(1);
39159 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
39160 !ISD::isBuildVectorAllOnes(Ones.getNode()))
39163 // The shift should be smearing the sign bit across each vector element.
39165 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
39167 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
39170 // Create a greater-than comparison against -1. We don't use the more obvious
39171 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
39172 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
39175 /// Check if truncation with saturation form type \p SrcVT to \p DstVT
39176 /// is valid for the given \p Subtarget.
39177 static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT,
39178 const X86Subtarget &Subtarget) {
39179 if (!Subtarget.hasAVX512())
39182 // FIXME: Scalar type may be supported if we move it to vector register.
39183 if (!SrcVT.isVector())
39186 EVT SrcElVT = SrcVT.getScalarType();
39187 EVT DstElVT = DstVT.getScalarType();
39188 if (DstElVT != MVT::i8 && DstElVT != MVT::i16 && DstElVT != MVT::i32)
39190 if (SrcVT.is512BitVector() || Subtarget.hasVLX())
39191 return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI();
39195 /// Detect patterns of truncation with unsigned saturation:
39197 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
39198 /// Return the source value x to be truncated or SDValue() if the pattern was
39201 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
39202 /// where C1 >= 0 and C2 is unsigned max of destination type.
39204 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
39205 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
39207 /// These two patterns are equivalent to:
39208 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
39209 /// So return the smax(x, C1) value to be truncated or SDValue() if the
39210 /// pattern was not matched.
39211 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39213 EVT InVT = In.getValueType();
39215 // Saturation with truncation. We truncate from InVT to VT.
39216 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
39217 "Unexpected types for truncate operation");
39219 // Match min/max and return limit value as a parameter.
39220 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
39221 if (V.getOpcode() == Opcode &&
39222 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
39223 return V.getOperand(0);
39228 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
39229 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
39230 // the element size of the destination type.
39231 if (C2.isMask(VT.getScalarSizeInBits()))
39234 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
39235 if (MatchMinMax(SMin, ISD::SMAX, C1))
39236 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
39239 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
39240 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
39241 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
39243 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
39249 /// Detect patterns of truncation with signed saturation:
39250 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
39251 /// signed_max_of_dest_type)) to dest_type)
39253 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
39254 /// signed_min_of_dest_type)) to dest_type).
39255 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
39256 /// Return the source value to be truncated or SDValue() if the pattern was not
39258 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
39259 unsigned NumDstBits = VT.getScalarSizeInBits();
39260 unsigned NumSrcBits = In.getScalarValueSizeInBits();
39261 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
39263 auto MatchMinMax = [](SDValue V, unsigned Opcode,
39264 const APInt &Limit) -> SDValue {
39266 if (V.getOpcode() == Opcode &&
39267 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
39268 return V.getOperand(0);
39272 APInt SignedMax, SignedMin;
39274 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
39275 SignedMin = APInt(NumSrcBits, 0);
39277 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
39278 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
39281 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
39282 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
39285 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
39286 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
39292 /// Detect a pattern of truncation with signed saturation.
39293 /// The types should allow to use VPMOVSS* instruction on AVX512.
39294 /// Return the source value to be truncated or SDValue() if the pattern was not
39296 static SDValue detectAVX512SSatPattern(SDValue In, EVT VT,
39297 const X86Subtarget &Subtarget,
39298 const TargetLowering &TLI) {
39299 if (!TLI.isTypeLegal(In.getValueType()))
39301 if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
39303 return detectSSatPattern(In, VT);
39306 /// Detect a pattern of truncation with saturation:
39307 /// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
39308 /// The types should allow to use VPMOVUS* instruction on AVX512.
39309 /// Return the source value to be truncated or SDValue() if the pattern was not
39311 static SDValue detectAVX512USatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39313 const X86Subtarget &Subtarget,
39314 const TargetLowering &TLI) {
39315 if (!TLI.isTypeLegal(In.getValueType()))
39317 if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
39319 return detectUSatPattern(In, VT, DAG, DL);
39322 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
39324 const X86Subtarget &Subtarget) {
39325 EVT SVT = VT.getScalarType();
39326 EVT InVT = In.getValueType();
39327 EVT InSVT = InVT.getScalarType();
39328 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39329 if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) &&
39330 isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) {
39331 if (auto SSatVal = detectSSatPattern(In, VT))
39332 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
39333 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
39334 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
39336 if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) &&
39337 !Subtarget.hasAVX512() &&
39338 (SVT == MVT::i8 || SVT == MVT::i16) &&
39339 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
39340 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39341 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
39342 if (SVT == MVT::i8 && InSVT == MVT::i32) {
39343 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
39344 VT.getVectorNumElements());
39345 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
39348 return truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
39350 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
39351 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
39354 if (auto SSatVal = detectSSatPattern(In, VT))
39355 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
39361 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
39362 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
39363 /// X86ISD::AVG instruction.
39364 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39365 const X86Subtarget &Subtarget,
39367 if (!VT.isVector())
39369 EVT InVT = In.getValueType();
39370 unsigned NumElems = VT.getVectorNumElements();
39372 EVT ScalarVT = VT.getVectorElementType();
39373 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
39374 NumElems >= 2 && isPowerOf2_32(NumElems)))
39377 // InScalarVT is the intermediate type in AVG pattern and it should be greater
39378 // than the original input type (i8/i16).
39379 EVT InScalarVT = InVT.getVectorElementType();
39380 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
39383 if (!Subtarget.hasSSE2())
39386 // Detect the following pattern:
39388 // %1 = zext <N x i8> %a to <N x i32>
39389 // %2 = zext <N x i8> %b to <N x i32>
39390 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
39391 // %4 = add nuw nsw <N x i32> %3, %2
39392 // %5 = lshr <N x i32> %N, <i32 1 x N>
39393 // %6 = trunc <N x i32> %5 to <N x i8>
39395 // In AVX512, the last instruction can also be a trunc store.
39396 if (In.getOpcode() != ISD::SRL)
39399 // A lambda checking the given SDValue is a constant vector and each element
39400 // is in the range [Min, Max].
39401 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
39402 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
39403 if (!BV || !BV->isConstant())
39405 for (SDValue Op : V->ops()) {
39406 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
39409 const APInt &Val = C->getAPIntValue();
39410 if (Val.ult(Min) || Val.ugt(Max))
39416 // Check if each element of the vector is left-shifted by one.
39417 auto LHS = In.getOperand(0);
39418 auto RHS = In.getOperand(1);
39419 if (!IsConstVectorInRange(RHS, 1, 1))
39421 if (LHS.getOpcode() != ISD::ADD)
39424 // Detect a pattern of a + b + 1 where the order doesn't matter.
39425 SDValue Operands[3];
39426 Operands[0] = LHS.getOperand(0);
39427 Operands[1] = LHS.getOperand(1);
39429 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39430 ArrayRef<SDValue> Ops) {
39431 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
39434 // Take care of the case when one of the operands is a constant vector whose
39435 // element is in the range [1, 256].
39436 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
39437 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
39438 Operands[0].getOperand(0).getValueType() == VT) {
39439 // The pattern is detected. Subtract one from the constant vector, then
39440 // demote it and emit X86ISD::AVG instruction.
39441 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
39442 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
39443 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
39444 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
39445 { Operands[0].getOperand(0), Operands[1] },
39449 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
39450 // Match the or case only if its 'add-like' - can be replaced by an add.
39451 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
39452 if (ISD::ADD == V.getOpcode()) {
39453 Op0 = V.getOperand(0);
39454 Op1 = V.getOperand(1);
39457 if (ISD::ZERO_EXTEND != V.getOpcode())
39459 V = V.getOperand(0);
39460 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
39461 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
39463 Op0 = V.getOperand(0);
39464 Op1 = V.getOperand(1);
39469 if (FindAddLike(Operands[0], Op0, Op1))
39470 std::swap(Operands[0], Operands[1]);
39471 else if (!FindAddLike(Operands[1], Op0, Op1))
39476 // Now we have three operands of two additions. Check that one of them is a
39477 // constant vector with ones, and the other two can be promoted from i8/i16.
39478 for (int i = 0; i < 3; ++i) {
39479 if (!IsConstVectorInRange(Operands[i], 1, 1))
39481 std::swap(Operands[i], Operands[2]);
39483 // Check if Operands[0] and Operands[1] are results of type promotion.
39484 for (int j = 0; j < 2; ++j)
39485 if (Operands[j].getValueType() != VT) {
39486 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
39487 Operands[j].getOperand(0).getValueType() != VT)
39489 Operands[j] = Operands[j].getOperand(0);
39492 // The pattern is detected, emit X86ISD::AVG instruction(s).
39493 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
39500 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
39501 TargetLowering::DAGCombinerInfo &DCI,
39502 const X86Subtarget &Subtarget) {
39503 LoadSDNode *Ld = cast<LoadSDNode>(N);
39504 EVT RegVT = Ld->getValueType(0);
39505 EVT MemVT = Ld->getMemoryVT();
39507 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39509 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
39510 // into two 16-byte operations. Also split non-temporal aligned loads on
39511 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
39512 ISD::LoadExtType Ext = Ld->getExtensionType();
39514 unsigned Alignment = Ld->getAlignment();
39515 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
39516 Ext == ISD::NON_EXTLOAD &&
39517 ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
39518 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
39519 *Ld->getMemOperand(), &Fast) &&
39521 unsigned NumElems = RegVT.getVectorNumElements();
39525 unsigned HalfAlign = 16;
39526 SDValue Ptr1 = Ld->getBasePtr();
39527 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
39528 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
39531 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
39532 Alignment, Ld->getMemOperand()->getFlags());
39533 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
39534 Ld->getPointerInfo().getWithOffset(HalfAlign),
39535 MinAlign(Alignment, HalfAlign),
39536 Ld->getMemOperand()->getFlags());
39537 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
39538 Load1.getValue(1), Load2.getValue(1));
39540 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
39541 return DCI.CombineTo(N, NewVec, TF, true);
39544 // Bool vector load - attempt to cast to an integer, as we have good
39545 // (vXiY *ext(vXi1 bitcast(iX))) handling.
39546 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
39547 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
39548 unsigned NumElts = RegVT.getVectorNumElements();
39549 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
39550 if (TLI.isTypeLegal(IntVT)) {
39551 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
39552 Ld->getPointerInfo(), Alignment,
39553 Ld->getMemOperand()->getFlags());
39554 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
39555 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
39562 /// If V is a build vector of boolean constants and exactly one of those
39563 /// constants is true, return the operand index of that true element.
39564 /// Otherwise, return -1.
39565 static int getOneTrueElt(SDValue V) {
39566 // This needs to be a build vector of booleans.
39567 // TODO: Checking for the i1 type matches the IR definition for the mask,
39568 // but the mask check could be loosened to i8 or other types. That might
39569 // also require checking more than 'allOnesValue'; eg, the x86 HW
39570 // instructions only require that the MSB is set for each mask element.
39571 // The ISD::MSTORE comments/definition do not specify how the mask operand
39573 auto *BV = dyn_cast<BuildVectorSDNode>(V);
39574 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
39577 int TrueIndex = -1;
39578 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
39579 for (unsigned i = 0; i < NumElts; ++i) {
39580 const SDValue &Op = BV->getOperand(i);
39583 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
39586 if (ConstNode->getAPIntValue().isAllOnesValue()) {
39587 // If we already found a one, this is too many.
39588 if (TrueIndex >= 0)
39596 /// Given a masked memory load/store operation, return true if it has one mask
39597 /// bit set. If it has one mask bit set, then also return the memory address of
39598 /// the scalar element to load/store, the vector index to insert/extract that
39599 /// scalar element, and the alignment for the scalar memory access.
39600 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
39601 SelectionDAG &DAG, SDValue &Addr,
39602 SDValue &Index, unsigned &Alignment) {
39603 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
39604 if (TrueMaskElt < 0)
39607 // Get the address of the one scalar element that is specified by the mask
39608 // using the appropriate offset from the base pointer.
39609 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
39610 Addr = MaskedOp->getBasePtr();
39611 if (TrueMaskElt != 0) {
39612 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
39613 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
39616 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
39617 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
39621 /// If exactly one element of the mask is set for a non-extending masked load,
39622 /// it is a scalar load and vector insert.
39623 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
39624 /// mask have already been optimized in IR, so we don't bother with those here.
39626 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
39627 TargetLowering::DAGCombinerInfo &DCI) {
39628 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
39629 // However, some target hooks may need to be added to know when the transform
39630 // is profitable. Endianness would also have to be considered.
39632 SDValue Addr, VecIndex;
39633 unsigned Alignment;
39634 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
39637 // Load the one scalar element that is specified by the mask using the
39638 // appropriate offset from the base pointer.
39640 EVT VT = ML->getValueType(0);
39641 EVT EltVT = VT.getVectorElementType();
39643 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
39644 Alignment, ML->getMemOperand()->getFlags());
39646 // Insert the loaded element into the appropriate place in the vector.
39647 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
39648 ML->getPassThru(), Load, VecIndex);
39649 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
39653 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
39654 TargetLowering::DAGCombinerInfo &DCI) {
39655 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
39659 EVT VT = ML->getValueType(0);
39661 // If we are loading the first and last elements of a vector, it is safe and
39662 // always faster to load the whole vector. Replace the masked load with a
39663 // vector load and select.
39664 unsigned NumElts = VT.getVectorNumElements();
39665 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
39666 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
39667 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
39668 if (LoadFirstElt && LoadLastElt) {
39669 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
39670 ML->getMemOperand());
39671 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
39672 ML->getPassThru());
39673 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
39676 // Convert a masked load with a constant mask into a masked load and a select.
39677 // This allows the select operation to use a faster kind of select instruction
39678 // (for example, vblendvps -> vblendps).
39680 // Don't try this if the pass-through operand is already undefined. That would
39681 // cause an infinite loop because that's what we're about to create.
39682 if (ML->getPassThru().isUndef())
39685 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
39688 // The new masked load has an undef pass-through operand. The select uses the
39689 // original pass-through operand.
39690 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
39691 ML->getMask(), DAG.getUNDEF(VT),
39692 ML->getMemoryVT(), ML->getMemOperand(),
39693 ML->getExtensionType());
39694 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
39695 ML->getPassThru());
39697 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
39700 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
39701 TargetLowering::DAGCombinerInfo &DCI,
39702 const X86Subtarget &Subtarget) {
39703 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
39705 // TODO: Expanding load with constant mask may be optimized as well.
39706 if (Mld->isExpandingLoad())
39709 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
39710 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
39712 // TODO: Do some AVX512 subsets benefit from this transform?
39713 if (!Subtarget.hasAVX512())
39714 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
39718 if (Mld->getExtensionType() != ISD::EXTLOAD)
39721 // Resolve extending loads.
39722 EVT VT = Mld->getValueType(0);
39723 unsigned NumElems = VT.getVectorNumElements();
39724 EVT LdVT = Mld->getMemoryVT();
39727 assert(LdVT != VT && "Cannot extend to the same type");
39728 unsigned ToSz = VT.getScalarSizeInBits();
39729 unsigned FromSz = LdVT.getScalarSizeInBits();
39730 // From/To sizes and ElemCount must be pow of two.
39731 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
39732 "Unexpected size for extending masked load");
39734 unsigned SizeRatio = ToSz / FromSz;
39735 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
39737 // Create a type on which we perform the shuffle.
39738 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
39739 LdVT.getScalarType(), NumElems*SizeRatio);
39740 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
39742 // Convert PassThru value.
39743 SDValue WidePassThru = DAG.getBitcast(WideVecVT, Mld->getPassThru());
39744 if (!Mld->getPassThru().isUndef()) {
39745 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39746 for (unsigned i = 0; i != NumElems; ++i)
39747 ShuffleVec[i] = i * SizeRatio;
39749 // Can't shuffle using an illegal type.
39750 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39751 "WideVecVT should be legal");
39752 WidePassThru = DAG.getVectorShuffle(WideVecVT, dl, WidePassThru,
39753 DAG.getUNDEF(WideVecVT), ShuffleVec);
39756 // Prepare the new mask.
39758 SDValue Mask = Mld->getMask();
39759 if (Mask.getValueType() == VT) {
39760 // Mask and original value have the same type.
39761 NewMask = DAG.getBitcast(WideVecVT, Mask);
39762 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39763 for (unsigned i = 0; i != NumElems; ++i)
39764 ShuffleVec[i] = i * SizeRatio;
39765 for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
39766 ShuffleVec[i] = NumElems * SizeRatio;
39767 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
39768 DAG.getConstant(0, dl, WideVecVT),
39771 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
39772 unsigned WidenNumElts = NumElems*SizeRatio;
39773 unsigned MaskNumElts = VT.getVectorNumElements();
39774 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
39777 unsigned NumConcat = WidenNumElts / MaskNumElts;
39778 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
39779 SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
39781 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
39784 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
39785 Mld->getBasePtr(), NewMask, WidePassThru,
39786 Mld->getMemoryVT(), Mld->getMemOperand(),
39789 SDValue SlicedVec = DAG.getBitcast(WideVecVT, WideLd);
39790 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39791 for (unsigned i = 0; i != NumElems; ++i)
39792 ShuffleVec[i * SizeRatio] = i;
39794 // Can't shuffle using an illegal type.
39795 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39796 "WideVecVT should be legal");
39797 SlicedVec = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
39798 DAG.getUNDEF(WideVecVT), ShuffleVec);
39799 SlicedVec = DAG.getBitcast(VT, SlicedVec);
39801 return DCI.CombineTo(N, SlicedVec, WideLd.getValue(1), true);
39804 /// If exactly one element of the mask is set for a non-truncating masked store,
39805 /// it is a vector extract and scalar store.
39806 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
39807 /// mask have already been optimized in IR, so we don't bother with those here.
39808 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
39809 SelectionDAG &DAG) {
39810 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
39811 // However, some target hooks may need to be added to know when the transform
39812 // is profitable. Endianness would also have to be considered.
39814 SDValue Addr, VecIndex;
39815 unsigned Alignment;
39816 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
39819 // Extract the one scalar element that is actually being stored.
39821 EVT VT = MS->getValue().getValueType();
39822 EVT EltVT = VT.getVectorElementType();
39823 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
39824 MS->getValue(), VecIndex);
39826 // Store that element at the appropriate offset from the base pointer.
39827 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
39828 Alignment, MS->getMemOperand()->getFlags());
39831 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
39832 TargetLowering::DAGCombinerInfo &DCI,
39833 const X86Subtarget &Subtarget) {
39834 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
39835 if (Mst->isCompressingStore())
39838 EVT VT = Mst->getValue().getValueType();
39839 EVT StVT = Mst->getMemoryVT();
39841 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39843 if (!Mst->isTruncatingStore()) {
39844 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
39845 return ScalarStore;
39847 // If the mask value has been legalized to a non-boolean vector, try to
39848 // simplify ops leading up to it. We only demand the MSB of each lane.
39849 SDValue Mask = Mst->getMask();
39850 if (Mask.getScalarValueSizeInBits() != 1) {
39851 APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
39852 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
39853 return SDValue(N, 0);
39856 // TODO: AVX512 targets should also be able to simplify something like the
39857 // pattern above, but that pattern will be different. It will either need to
39858 // match setcc more generally or match PCMPGTM later (in tablegen?).
39860 SDValue Value = Mst->getValue();
39861 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
39862 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
39863 Mst->getMemoryVT())) {
39864 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
39865 Mst->getBasePtr(), Mask,
39866 Mst->getMemoryVT(), Mst->getMemOperand(), true);
39872 // Resolve truncating stores.
39873 unsigned NumElems = VT.getVectorNumElements();
39875 assert(StVT != VT && "Cannot truncate to the same type");
39876 unsigned FromSz = VT.getScalarSizeInBits();
39877 unsigned ToSz = StVT.getScalarSizeInBits();
39879 // The truncating store is legal in some cases. For example
39880 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
39881 // are designated for truncate store.
39882 // In this case we don't need any further transformations.
39883 if (TLI.isTruncStoreLegal(VT, StVT))
39886 // From/To sizes and ElemCount must be pow of two.
39887 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
39888 "Unexpected size for truncating masked store");
39889 // We are going to use the original vector elt for storing.
39890 // Accumulated smaller vector elements must be a multiple of the store size.
39891 assert (((NumElems * FromSz) % ToSz) == 0 &&
39892 "Unexpected ratio for truncating masked store");
39894 unsigned SizeRatio = FromSz / ToSz;
39895 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
39897 // Create a type on which we perform the shuffle.
39898 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
39899 StVT.getScalarType(), NumElems*SizeRatio);
39901 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
39903 SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
39904 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
39905 for (unsigned i = 0; i != NumElems; ++i)
39906 ShuffleVec[i] = i * SizeRatio;
39908 // Can't shuffle using an illegal type.
39909 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
39910 "WideVecVT should be legal");
39912 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
39913 DAG.getUNDEF(WideVecVT),
39917 SDValue Mask = Mst->getMask();
39918 if (Mask.getValueType() == VT) {
39919 // Mask and original value have the same type.
39920 NewMask = DAG.getBitcast(WideVecVT, Mask);
39921 for (unsigned i = 0; i != NumElems; ++i)
39922 ShuffleVec[i] = i * SizeRatio;
39923 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
39924 ShuffleVec[i] = NumElems*SizeRatio;
39925 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
39926 DAG.getConstant(0, dl, WideVecVT),
39929 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
39930 unsigned WidenNumElts = NumElems*SizeRatio;
39931 unsigned MaskNumElts = VT.getVectorNumElements();
39932 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
39935 unsigned NumConcat = WidenNumElts / MaskNumElts;
39936 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
39937 SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
39939 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
39942 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
39943 Mst->getBasePtr(), NewMask, StVT,
39944 Mst->getMemOperand(), false);
39947 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
39948 TargetLowering::DAGCombinerInfo &DCI,
39949 const X86Subtarget &Subtarget) {
39950 StoreSDNode *St = cast<StoreSDNode>(N);
39951 EVT VT = St->getValue().getValueType();
39952 EVT StVT = St->getMemoryVT();
39954 unsigned Alignment = St->getAlignment();
39955 SDValue StoredVal = St->getOperand(1);
39956 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39958 // Convert a store of vXi1 into a store of iX and a bitcast.
39959 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
39960 VT.getVectorElementType() == MVT::i1) {
39962 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
39963 StoredVal = DAG.getBitcast(NewVT, StoredVal);
39965 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
39966 St->getPointerInfo(), St->getAlignment(),
39967 St->getMemOperand()->getFlags());
39970 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
39971 // This will avoid a copy to k-register.
39972 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
39973 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39974 StoredVal.getOperand(0).getValueType() == MVT::i8) {
39975 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
39976 St->getBasePtr(), St->getPointerInfo(),
39977 St->getAlignment(), St->getMemOperand()->getFlags());
39980 // Widen v2i1/v4i1 stores to v8i1.
39981 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
39982 Subtarget.hasAVX512()) {
39983 unsigned NumConcats = 8 / VT.getVectorNumElements();
39984 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
39985 Ops[0] = StoredVal;
39986 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
39987 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
39988 St->getPointerInfo(), St->getAlignment(),
39989 St->getMemOperand()->getFlags());
39992 // Turn vXi1 stores of constants into a scalar store.
39993 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
39994 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
39995 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
39996 // If its a v64i1 store without 64-bit support, we need two stores.
39997 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
39998 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
39999 StoredVal->ops().slice(0, 32));
40000 Lo = combinevXi1ConstantToInteger(Lo, DAG);
40001 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
40002 StoredVal->ops().slice(32, 32));
40003 Hi = combinevXi1ConstantToInteger(Hi, DAG);
40005 SDValue Ptr0 = St->getBasePtr();
40006 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
40009 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
40010 Alignment, St->getMemOperand()->getFlags());
40012 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
40013 St->getPointerInfo().getWithOffset(4),
40014 MinAlign(Alignment, 4U),
40015 St->getMemOperand()->getFlags());
40016 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
40019 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
40020 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40021 St->getPointerInfo(), St->getAlignment(),
40022 St->getMemOperand()->getFlags());
40025 // If we are saving a concatenation of two XMM registers and 32-byte stores
40026 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
40028 if (VT.is256BitVector() && StVT == VT &&
40029 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
40030 *St->getMemOperand(), &Fast) &&
40032 unsigned NumElems = VT.getVectorNumElements();
40036 return splitVectorStore(St, DAG);
40039 // Split under-aligned vector non-temporal stores.
40040 if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
40041 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
40042 // vectors or the legalizer can scalarize it to use MOVNTI.
40043 if (VT.is256BitVector() || VT.is512BitVector()) {
40044 unsigned NumElems = VT.getVectorNumElements();
40047 return splitVectorStore(St, DAG);
40050 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
40052 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
40053 MVT NTVT = Subtarget.hasSSE4A()
40055 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
40056 return scalarizeVectorStore(St, NTVT, DAG);
40060 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
40061 // supported, but avx512f is by extending to v16i32 and truncating.
40062 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
40063 St->getValue().getOpcode() == ISD::TRUNCATE &&
40064 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
40065 TLI.isTruncStoreLegalOrCustom(MVT::v16i32, MVT::v16i8) &&
40066 !DCI.isBeforeLegalizeOps()) {
40067 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
40068 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
40069 MVT::v16i8, St->getMemOperand());
40072 // Optimize trunc store (of multiple scalars) to shuffle and store.
40073 // First, pack all of the elements in one place. Next, store to memory
40074 // in fewer chunks.
40075 if (St->isTruncatingStore() && VT.isVector()) {
40076 // Check if we can detect an AVG pattern from the truncation. If yes,
40077 // replace the trunc store by a normal store with the result of X86ISD::AVG
40079 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
40081 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
40082 St->getPointerInfo(), St->getAlignment(),
40083 St->getMemOperand()->getFlags());
40086 detectAVX512SSatPattern(St->getValue(), St->getMemoryVT(), Subtarget,
40088 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
40089 dl, Val, St->getBasePtr(),
40090 St->getMemoryVT(), St->getMemOperand(), DAG);
40091 if (SDValue Val = detectAVX512USatPattern(St->getValue(), St->getMemoryVT(),
40092 DAG, dl, Subtarget, TLI))
40093 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
40094 dl, Val, St->getBasePtr(),
40095 St->getMemoryVT(), St->getMemOperand(), DAG);
40097 unsigned NumElems = VT.getVectorNumElements();
40098 assert(StVT != VT && "Cannot truncate to the same type");
40099 unsigned FromSz = VT.getScalarSizeInBits();
40100 unsigned ToSz = StVT.getScalarSizeInBits();
40102 // The truncating store is legal in some cases. For example
40103 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
40104 // are designated for truncate store.
40105 // In this case we don't need any further transformations.
40106 if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
40109 // From, To sizes and ElemCount must be pow of two
40110 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
40111 // We are going to use the original vector elt for storing.
40112 // Accumulated smaller vector elements must be a multiple of the store size.
40113 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
40115 unsigned SizeRatio = FromSz / ToSz;
40117 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
40119 // Create a type on which we perform the shuffle
40120 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
40121 StVT.getScalarType(), NumElems*SizeRatio);
40123 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
40125 SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
40126 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
40127 for (unsigned i = 0; i != NumElems; ++i)
40128 ShuffleVec[i] = i * SizeRatio;
40130 // Can't shuffle using an illegal type.
40131 if (!TLI.isTypeLegal(WideVecVT))
40134 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
40135 DAG.getUNDEF(WideVecVT),
40137 // At this point all of the data is stored at the bottom of the
40138 // register. We now need to save it to mem.
40140 // Find the largest store unit
40141 MVT StoreType = MVT::i8;
40142 for (MVT Tp : MVT::integer_valuetypes()) {
40143 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
40147 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
40148 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
40149 (64 <= NumElems * ToSz))
40150 StoreType = MVT::f64;
40152 // Bitcast the original vector into a vector of store-size units
40153 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
40154 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
40155 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
40156 SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
40157 SmallVector<SDValue, 8> Chains;
40158 SDValue Ptr = St->getBasePtr();
40160 // Perform one or more big stores into memory.
40161 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
40162 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
40163 StoreType, ShuffWide,
40164 DAG.getIntPtrConstant(i, dl));
40166 DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
40167 St->getAlignment(), St->getMemOperand()->getFlags());
40168 Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
40169 Chains.push_back(Ch);
40172 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
40175 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
40176 // the FP state in cases where an emms may be missing.
40177 // A preferable solution to the general problem is to figure out the right
40178 // places to insert EMMS. This qualifies as a quick hack.
40180 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
40181 if (VT.getSizeInBits() != 64)
40184 const Function &F = DAG.getMachineFunction().getFunction();
40185 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
40187 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
40188 if (((VT.isVector() && !VT.isFloatingPoint()) ||
40189 (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
40190 isa<LoadSDNode>(St->getValue()) &&
40191 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
40192 St->getChain().hasOneUse() && !St->isVolatile()) {
40193 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
40194 SmallVector<SDValue, 8> Ops;
40196 if (!ISD::isNormalLoad(Ld))
40199 // If this is not the MMX case, i.e. we are just turning i64 load/store
40200 // into f64 load/store, avoid the transformation if there are multiple
40201 // uses of the loaded value.
40202 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
40207 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
40208 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
40210 if (Subtarget.is64Bit() || F64IsLegal) {
40211 MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
40212 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
40213 Ld->getMemOperand());
40215 // Make sure new load is placed in same chain order.
40216 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
40217 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
40218 St->getMemOperand());
40221 // Otherwise, lower to two pairs of 32-bit loads / stores.
40222 SDValue LoAddr = Ld->getBasePtr();
40223 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
40225 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
40226 Ld->getPointerInfo(), Ld->getAlignment(),
40227 Ld->getMemOperand()->getFlags());
40228 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
40229 Ld->getPointerInfo().getWithOffset(4),
40230 MinAlign(Ld->getAlignment(), 4),
40231 Ld->getMemOperand()->getFlags());
40232 // Make sure new loads are placed in same chain order.
40233 DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
40234 DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
40236 LoAddr = St->getBasePtr();
40237 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
40240 DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
40241 St->getAlignment(), St->getMemOperand()->getFlags());
40242 SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
40243 St->getPointerInfo().getWithOffset(4),
40244 MinAlign(St->getAlignment(), 4),
40245 St->getMemOperand()->getFlags());
40246 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
40249 // This is similar to the above case, but here we handle a scalar 64-bit
40250 // integer store that is extracted from a vector on a 32-bit target.
40251 // If we have SSE2, then we can treat it like a floating-point double
40252 // to get past legalization. The execution dependencies fixup pass will
40253 // choose the optimal machine instruction for the store if this really is
40254 // an integer or v2f32 rather than an f64.
40255 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
40256 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
40257 SDValue OldExtract = St->getOperand(1);
40258 SDValue ExtOp0 = OldExtract.getOperand(0);
40259 unsigned VecSize = ExtOp0.getValueSizeInBits();
40260 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
40261 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
40262 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
40263 BitCast, OldExtract.getOperand(1));
40264 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
40265 St->getPointerInfo(), St->getAlignment(),
40266 St->getMemOperand()->getFlags());
40272 /// Return 'true' if this vector operation is "horizontal"
40273 /// and return the operands for the horizontal operation in LHS and RHS. A
40274 /// horizontal operation performs the binary operation on successive elements
40275 /// of its first operand, then on successive elements of its second operand,
40276 /// returning the resulting values in a vector. For example, if
40277 /// A = < float a0, float a1, float a2, float a3 >
40279 /// B = < float b0, float b1, float b2, float b3 >
40280 /// then the result of doing a horizontal operation on A and B is
40281 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
40282 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
40283 /// A horizontal-op B, for some already available A and B, and if so then LHS is
40284 /// set to A, RHS to B, and the routine returns 'true'.
40285 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
40286 const X86Subtarget &Subtarget,
40287 bool IsCommutative) {
40288 // If either operand is undef, bail out. The binop should be simplified.
40289 if (LHS.isUndef() || RHS.isUndef())
40292 // Look for the following pattern:
40293 // A = < float a0, float a1, float a2, float a3 >
40294 // B = < float b0, float b1, float b2, float b3 >
40296 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
40297 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
40298 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
40299 // which is A horizontal-op B.
40301 MVT VT = LHS.getSimpleValueType();
40302 assert((VT.is128BitVector() || VT.is256BitVector()) &&
40303 "Unsupported vector type for horizontal add/sub");
40304 unsigned NumElts = VT.getVectorNumElements();
40306 // TODO - can we make a general helper method that does all of this for us?
40307 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
40308 SmallVectorImpl<int> &ShuffleMask) {
40309 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
40310 if (!Op.getOperand(0).isUndef())
40311 N0 = Op.getOperand(0);
40312 if (!Op.getOperand(1).isUndef())
40313 N1 = Op.getOperand(1);
40314 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
40315 ShuffleMask.append(Mask.begin(), Mask.end());
40318 bool UseSubVector = false;
40319 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40320 Op.getOperand(0).getValueType().is256BitVector() &&
40321 llvm::isNullConstant(Op.getOperand(1))) {
40322 Op = Op.getOperand(0);
40323 UseSubVector = true;
40326 SmallVector<SDValue, 2> SrcOps;
40327 SmallVector<int, 16> SrcShuffleMask;
40328 SDValue BC = peekThroughBitcasts(Op);
40329 if (isTargetShuffle(BC.getOpcode()) &&
40330 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
40331 SrcOps, SrcShuffleMask, IsUnary)) {
40332 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
40333 SrcOps.size() <= 2) {
40334 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
40335 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
40336 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
40338 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
40339 SrcOps.size() == 1) {
40340 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
40341 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
40342 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
40343 ShuffleMask.append(Mask.begin(), Mask.end());
40348 // View LHS in the form
40349 // LHS = VECTOR_SHUFFLE A, B, LMask
40350 // If LHS is not a shuffle, then pretend it is the identity shuffle:
40351 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
40352 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
40354 SmallVector<int, 16> LMask;
40355 GetShuffle(LHS, A, B, LMask);
40357 // Likewise, view RHS in the form
40358 // RHS = VECTOR_SHUFFLE C, D, RMask
40360 SmallVector<int, 16> RMask;
40361 GetShuffle(RHS, C, D, RMask);
40363 // At least one of the operands should be a vector shuffle.
40364 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
40365 if (NumShuffles == 0)
40368 if (LMask.empty()) {
40370 for (unsigned i = 0; i != NumElts; ++i)
40371 LMask.push_back(i);
40374 if (RMask.empty()) {
40376 for (unsigned i = 0; i != NumElts; ++i)
40377 RMask.push_back(i);
40380 // If A and B occur in reverse order in RHS, then canonicalize by commuting
40381 // RHS operands and shuffle mask.
40384 ShuffleVectorSDNode::commuteMask(RMask);
40386 // Check that the shuffles are both shuffling the same vectors.
40387 if (!(A == C && B == D))
40390 // LHS and RHS are now:
40391 // LHS = shuffle A, B, LMask
40392 // RHS = shuffle A, B, RMask
40393 // Check that the masks correspond to performing a horizontal operation.
40394 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
40395 // so we just repeat the inner loop if this is a 256-bit op.
40396 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
40397 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
40398 assert((NumEltsPer128BitChunk % 2 == 0) &&
40399 "Vector type should have an even number of elements in each lane");
40400 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
40401 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
40402 // Ignore undefined components.
40403 int LIdx = LMask[i + j], RIdx = RMask[i + j];
40404 if (LIdx < 0 || RIdx < 0 ||
40405 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
40406 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
40409 // The low half of the 128-bit result must choose from A.
40410 // The high half of the 128-bit result must choose from B,
40411 // unless B is undef. In that case, we are always choosing from A.
40412 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
40413 unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
40415 // Check that successive elements are being operated on. If not, this is
40416 // not a horizontal operation.
40417 int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
40418 if (!(LIdx == Index && RIdx == Index + 1) &&
40419 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
40424 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
40425 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
40427 if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
40430 LHS = DAG.getBitcast(VT, LHS);
40431 RHS = DAG.getBitcast(VT, RHS);
40435 /// Do target-specific dag combines on floating-point adds/subs.
40436 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
40437 const X86Subtarget &Subtarget) {
40438 EVT VT = N->getValueType(0);
40439 SDValue LHS = N->getOperand(0);
40440 SDValue RHS = N->getOperand(1);
40441 bool IsFadd = N->getOpcode() == ISD::FADD;
40442 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
40443 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
40445 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
40446 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
40447 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
40448 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
40449 return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
40454 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
40456 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
40457 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
40458 /// anything that is guaranteed to be transformed by DAGCombiner.
40459 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
40460 const X86Subtarget &Subtarget,
40462 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
40463 SDValue Src = N->getOperand(0);
40464 unsigned SrcOpcode = Src.getOpcode();
40465 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40467 EVT VT = N->getValueType(0);
40468 EVT SrcVT = Src.getValueType();
40470 auto IsFreeTruncation = [VT](SDValue Op) {
40471 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
40473 // See if this has been extended from a smaller/equal size to
40474 // the truncation size, allowing a truncation to combine with the extend.
40475 unsigned Opcode = Op.getOpcode();
40476 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
40477 Opcode == ISD::ZERO_EXTEND) &&
40478 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
40481 // See if this is a single use constant which can be constant folded.
40482 // NOTE: We don't peek throught bitcasts here because there is currently
40483 // no support for constant folding truncate+bitcast+vector_of_constants. So
40484 // we'll just send up with a truncate on both operands which will
40485 // get turned back into (truncate (binop)) causing an infinite loop.
40486 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
40489 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
40490 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
40491 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
40492 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
40495 // Don't combine if the operation has other uses.
40496 if (!Src.hasOneUse())
40499 // Only support vector truncation for now.
40500 // TODO: i64 scalar math would benefit as well.
40501 if (!VT.isVector())
40504 // In most cases its only worth pre-truncating if we're only facing the cost
40505 // of one truncation.
40506 // i.e. if one of the inputs will constant fold or the input is repeated.
40507 switch (SrcOpcode) {
40511 SDValue Op0 = Src.getOperand(0);
40512 SDValue Op1 = Src.getOperand(1);
40513 if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
40514 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40515 return TruncateArithmetic(Op0, Op1);
40520 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
40521 // better to truncate if we have the chance.
40522 if (SrcVT.getScalarType() == MVT::i64 &&
40523 TLI.isOperationLegal(SrcOpcode, VT) &&
40524 !TLI.isOperationLegal(SrcOpcode, SrcVT))
40525 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
40528 SDValue Op0 = Src.getOperand(0);
40529 SDValue Op1 = Src.getOperand(1);
40530 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40531 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40532 return TruncateArithmetic(Op0, Op1);
40536 // TODO: ISD::SUB We are conservative and require both sides to be freely
40537 // truncatable to avoid interfering with combineSubToSubus.
40538 SDValue Op0 = Src.getOperand(0);
40539 SDValue Op1 = Src.getOperand(1);
40540 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40541 (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
40542 return TruncateArithmetic(Op0, Op1);
40550 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
40551 /// e.g. trunc <8 x i32> X to <8 x i16> -->
40552 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
40553 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
40554 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
40555 const X86Subtarget &Subtarget,
40556 SelectionDAG &DAG) {
40557 SDValue In = N->getOperand(0);
40558 EVT InVT = In.getValueType();
40559 EVT OutVT = N->getValueType(0);
40561 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
40562 OutVT.getScalarSizeInBits());
40563 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
40564 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
40567 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
40568 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
40569 const X86Subtarget &Subtarget,
40570 SelectionDAG &DAG) {
40571 SDValue In = N->getOperand(0);
40572 EVT InVT = In.getValueType();
40573 EVT OutVT = N->getValueType(0);
40574 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
40575 DAG.getValueType(OutVT));
40576 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
40579 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
40580 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
40581 /// legalization the truncation will be translated into a BUILD_VECTOR with each
40582 /// element that is extracted from a vector and then truncated, and it is
40583 /// difficult to do this optimization based on them.
40584 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
40585 const X86Subtarget &Subtarget) {
40586 EVT OutVT = N->getValueType(0);
40587 if (!OutVT.isVector())
40590 SDValue In = N->getOperand(0);
40591 if (!In.getValueType().isSimple())
40594 EVT InVT = In.getValueType();
40595 unsigned NumElems = OutVT.getVectorNumElements();
40597 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
40598 // SSE2, and we need to take care of it specially.
40599 // AVX512 provides vpmovdb.
40600 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
40603 EVT OutSVT = OutVT.getVectorElementType();
40604 EVT InSVT = InVT.getVectorElementType();
40605 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
40606 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
40610 // SSSE3's pshufb results in less instructions in the cases below.
40611 if (Subtarget.hasSSSE3() && NumElems == 8 &&
40612 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
40613 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
40617 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
40618 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
40619 // truncate 2 x v4i32 to v8i16.
40620 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
40621 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
40622 if (InSVT == MVT::i32)
40623 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
40628 /// This function transforms vector truncation of 'extended sign-bits' or
40629 /// 'extended zero-bits' values.
40630 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
40631 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
40633 const X86Subtarget &Subtarget) {
40634 // Requires SSE2 but AVX512 has fast truncate.
40635 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
40638 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
40641 SDValue In = N->getOperand(0);
40642 if (!In.getValueType().isSimple())
40645 MVT VT = N->getValueType(0).getSimpleVT();
40646 MVT SVT = VT.getScalarType();
40648 MVT InVT = In.getValueType().getSimpleVT();
40649 MVT InSVT = InVT.getScalarType();
40651 // Check we have a truncation suited for PACKSS/PACKUS.
40652 if (!VT.is128BitVector() && !VT.is256BitVector())
40654 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
40656 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
40659 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
40660 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
40662 // Use PACKUS if the input has zero-bits that extend all the way to the
40663 // packed/truncated value. e.g. masks, zext_in_reg, etc.
40664 KnownBits Known = DAG.computeKnownBits(In);
40665 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
40666 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
40667 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
40669 // Use PACKSS if the input has sign-bits that extend all the way to the
40670 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
40671 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
40672 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
40673 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
40678 // Try to form a MULHU or MULHS node by looking for
40679 // (trunc (srl (mul ext, ext), 16))
40680 // TODO: This is X86 specific because we want to be able to handle wide types
40681 // before type legalization. But we can only do it if the vector will be
40682 // legalized via widening/splitting. Type legalization can't handle promotion
40683 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
40685 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
40686 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
40687 // First instruction should be a right shift of a multiply.
40688 if (Src.getOpcode() != ISD::SRL ||
40689 Src.getOperand(0).getOpcode() != ISD::MUL)
40692 if (!Subtarget.hasSSE2())
40695 // Only handle vXi16 types that are at least 128-bits unless they will be
40697 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
40698 (!ExperimentalVectorWideningLegalization &&
40699 VT.getVectorNumElements() < 8))
40702 // Input type should be vXi32.
40703 EVT InVT = Src.getValueType();
40704 if (InVT.getVectorElementType() != MVT::i32)
40707 // Need a shift by 16.
40709 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
40713 SDValue LHS = Src.getOperand(0).getOperand(0);
40714 SDValue RHS = Src.getOperand(0).getOperand(1);
40716 unsigned ExtOpc = LHS.getOpcode();
40717 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
40718 RHS.getOpcode() != ExtOpc)
40721 // Peek through the extends.
40722 LHS = LHS.getOperand(0);
40723 RHS = RHS.getOperand(0);
40725 // Ensure the input types match.
40726 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
40729 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
40730 return DAG.getNode(Opc, DL, VT, LHS, RHS);
40733 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
40734 // from one vector with signed bytes from another vector, adds together
40735 // adjacent pairs of 16-bit products, and saturates the result before
40736 // truncating to 16-bits.
40738 // Which looks something like this:
40739 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
40740 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
40741 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
40742 const X86Subtarget &Subtarget,
40744 if (!VT.isVector() || !Subtarget.hasSSSE3())
40747 unsigned NumElems = VT.getVectorNumElements();
40748 EVT ScalarVT = VT.getVectorElementType();
40749 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
40752 SDValue SSatVal = detectSSatPattern(In, VT);
40753 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
40756 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
40757 // of multiplies from even/odd elements.
40758 SDValue N0 = SSatVal.getOperand(0);
40759 SDValue N1 = SSatVal.getOperand(1);
40761 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
40764 SDValue N00 = N0.getOperand(0);
40765 SDValue N01 = N0.getOperand(1);
40766 SDValue N10 = N1.getOperand(0);
40767 SDValue N11 = N1.getOperand(1);
40769 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
40770 // Canonicalize zero_extend to LHS.
40771 if (N01.getOpcode() == ISD::ZERO_EXTEND)
40772 std::swap(N00, N01);
40773 if (N11.getOpcode() == ISD::ZERO_EXTEND)
40774 std::swap(N10, N11);
40776 // Ensure we have a zero_extend and a sign_extend.
40777 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
40778 N01.getOpcode() != ISD::SIGN_EXTEND ||
40779 N10.getOpcode() != ISD::ZERO_EXTEND ||
40780 N11.getOpcode() != ISD::SIGN_EXTEND)
40783 // Peek through the extends.
40784 N00 = N00.getOperand(0);
40785 N01 = N01.getOperand(0);
40786 N10 = N10.getOperand(0);
40787 N11 = N11.getOperand(0);
40789 // Ensure the extend is from vXi8.
40790 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
40791 N01.getValueType().getVectorElementType() != MVT::i8 ||
40792 N10.getValueType().getVectorElementType() != MVT::i8 ||
40793 N11.getValueType().getVectorElementType() != MVT::i8)
40796 // All inputs should be build_vectors.
40797 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
40798 N01.getOpcode() != ISD::BUILD_VECTOR ||
40799 N10.getOpcode() != ISD::BUILD_VECTOR ||
40800 N11.getOpcode() != ISD::BUILD_VECTOR)
40803 // N00/N10 are zero extended. N01/N11 are sign extended.
40805 // For each element, we need to ensure we have an odd element from one vector
40806 // multiplied by the odd element of another vector and the even element from
40807 // one of the same vectors being multiplied by the even element from the
40808 // other vector. So we need to make sure for each element i, this operator
40809 // is being performed:
40810 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
40811 SDValue ZExtIn, SExtIn;
40812 for (unsigned i = 0; i != NumElems; ++i) {
40813 SDValue N00Elt = N00.getOperand(i);
40814 SDValue N01Elt = N01.getOperand(i);
40815 SDValue N10Elt = N10.getOperand(i);
40816 SDValue N11Elt = N11.getOperand(i);
40817 // TODO: Be more tolerant to undefs.
40818 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40819 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40820 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
40821 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
40823 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
40824 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
40825 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
40826 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
40827 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
40829 unsigned IdxN00 = ConstN00Elt->getZExtValue();
40830 unsigned IdxN01 = ConstN01Elt->getZExtValue();
40831 unsigned IdxN10 = ConstN10Elt->getZExtValue();
40832 unsigned IdxN11 = ConstN11Elt->getZExtValue();
40833 // Add is commutative so indices can be reordered.
40834 if (IdxN00 > IdxN10) {
40835 std::swap(IdxN00, IdxN10);
40836 std::swap(IdxN01, IdxN11);
40838 // N0 indices be the even element. N1 indices must be the next odd element.
40839 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
40840 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
40842 SDValue N00In = N00Elt.getOperand(0);
40843 SDValue N01In = N01Elt.getOperand(0);
40844 SDValue N10In = N10Elt.getOperand(0);
40845 SDValue N11In = N11Elt.getOperand(0);
40846 // First time we find an input capture it.
40851 if (ZExtIn != N00In || SExtIn != N01In ||
40852 ZExtIn != N10In || SExtIn != N11In)
40856 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
40857 ArrayRef<SDValue> Ops) {
40858 // Shrink by adding truncate nodes and let DAGCombine fold with the
40860 EVT InVT = Ops[0].getValueType();
40861 assert(InVT.getScalarType() == MVT::i8 &&
40862 "Unexpected scalar element type");
40863 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
40864 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
40865 InVT.getVectorNumElements() / 2);
40866 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
40868 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
40872 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
40873 const X86Subtarget &Subtarget) {
40874 EVT VT = N->getValueType(0);
40875 SDValue Src = N->getOperand(0);
40878 // Attempt to pre-truncate inputs to arithmetic ops instead.
40879 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
40882 // Try to detect AVG pattern first.
40883 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
40886 // Try to detect PMADD
40887 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
40890 // Try to combine truncation with signed/unsigned saturation.
40891 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
40894 // Try to combine PMULHUW/PMULHW for vXi16.
40895 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
40898 // The bitcast source is a direct mmx result.
40899 // Detect bitcasts between i32 to x86mmx
40900 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
40901 SDValue BCSrc = Src.getOperand(0);
40902 if (BCSrc.getValueType() == MVT::x86mmx)
40903 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
40906 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
40907 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
40910 return combineVectorTruncation(N, DAG, Subtarget);
40913 /// Returns the negated value if the node \p N flips sign of FP value.
40915 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
40917 /// AVX512F does not have FXOR, so FNEG is lowered as
40918 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
40919 /// In this case we go though all bitcasts.
40920 /// This also recognizes splat of a negated value and returns the splat of that
40922 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N) {
40923 if (N->getOpcode() == ISD::FNEG)
40924 return N->getOperand(0);
40926 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
40928 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
40929 EVT VT = Op->getValueType(0);
40930 // Make sure the element size does't change.
40931 if (VT.getScalarSizeInBits() != ScalarSize)
40934 if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
40935 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
40936 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
40937 if (!SVOp->getOperand(1).isUndef())
40939 if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode()))
40940 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
40941 return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
40945 unsigned Opc = Op.getOpcode();
40946 if (Opc == ISD::INSERT_VECTOR_ELT) {
40947 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
40949 SDValue InsVector = Op.getOperand(0);
40950 SDValue InsVal = Op.getOperand(1);
40951 if (!InsVector.isUndef())
40953 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode()))
40954 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
40955 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
40956 NegInsVal, Op.getOperand(2));
40960 if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
40963 SDValue Op1 = Op.getOperand(1);
40964 SDValue Op0 = Op.getOperand(0);
40966 // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
40967 // masks. For FSUB, we have to check if constant bits of Op0 are sign bit
40968 // masks and hence we swap the operands.
40969 if (Opc == ISD::FSUB)
40970 std::swap(Op0, Op1);
40973 SmallVector<APInt, 16> EltBits;
40974 // Extract constant bits and see if they are all sign bit masks. Ignore the
40976 if (getTargetConstantBitsFromNode(Op1, ScalarSize,
40977 UndefElts, EltBits,
40978 /* AllowWholeUndefs */ true,
40979 /* AllowPartialUndefs */ false)) {
40980 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
40981 if (!UndefElts[I] && !EltBits[I].isSignMask())
40984 return peekThroughBitcasts(Op0);
40990 /// Do target-specific dag combines on floating point negations.
40991 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
40992 const X86Subtarget &Subtarget) {
40993 EVT OrigVT = N->getValueType(0);
40994 SDValue Arg = isFNEG(DAG, N);
40998 EVT VT = Arg.getValueType();
40999 EVT SVT = VT.getScalarType();
41002 // Let legalize expand this if it isn't a legal type yet.
41003 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
41006 // If we're negating a FMUL node on a target with FMA, then we can avoid the
41007 // use of a constant by performing (-0 - A*B) instead.
41008 // FIXME: Check rounding control flags as well once it becomes available.
41009 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
41010 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
41011 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
41012 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
41013 Arg.getOperand(1), Zero);
41014 return DAG.getBitcast(OrigVT, NewNode);
41017 // If we're negating an FMA node, then we can adjust the
41018 // instruction to include the extra negation.
41019 unsigned NewOpcode = 0;
41020 if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
41021 switch (Arg.getOpcode()) {
41022 case ISD::FMA: NewOpcode = X86ISD::FNMSUB; break;
41023 case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break;
41024 case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break;
41025 case X86ISD::FNMSUB: NewOpcode = ISD::FMA; break;
41026 case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break;
41027 case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break;
41028 case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break;
41029 case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break;
41030 // We can't handle scalar intrinsic node here because it would only
41031 // invert one element and not the whole vector. But we could try to handle
41032 // a negation of the lower element only.
41036 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT,
41037 Arg.getNode()->ops()));
41042 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
41043 const X86Subtarget &Subtarget) {
41044 MVT VT = N->getSimpleValueType(0);
41045 // If we have integer vector types available, use the integer opcodes.
41046 if (!VT.isVector() || !Subtarget.hasSSE2())
41051 unsigned IntBits = VT.getScalarSizeInBits();
41052 MVT IntSVT = MVT::getIntegerVT(IntBits);
41053 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
41055 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
41056 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
41057 unsigned IntOpcode;
41058 switch (N->getOpcode()) {
41059 default: llvm_unreachable("Unexpected FP logic op");
41060 case X86ISD::FOR: IntOpcode = ISD::OR; break;
41061 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
41062 case X86ISD::FAND: IntOpcode = ISD::AND; break;
41063 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
41065 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
41066 return DAG.getBitcast(VT, IntOp);
41070 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
41071 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
41072 if (N->getOpcode() != ISD::XOR)
41075 SDValue LHS = N->getOperand(0);
41076 auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
41077 if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
41080 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
41081 X86::CondCode(LHS->getConstantOperandVal(0)));
41083 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
41086 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
41087 TargetLowering::DAGCombinerInfo &DCI,
41088 const X86Subtarget &Subtarget) {
41089 // If this is SSE1 only convert to FXOR to avoid scalarization.
41090 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
41091 N->getValueType(0) == MVT::v4i32) {
41092 return DAG.getBitcast(
41093 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
41094 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
41095 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
41098 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
41101 if (DCI.isBeforeLegalizeOps())
41104 if (SDValue SetCC = foldXor1SetCC(N, DAG))
41107 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
41110 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
41113 return combineFneg(N, DAG, Subtarget);
41116 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
41117 TargetLowering::DAGCombinerInfo &DCI,
41118 const X86Subtarget &Subtarget) {
41119 SDValue Op0 = N->getOperand(0);
41120 SDValue Op1 = N->getOperand(1);
41121 EVT VT = N->getValueType(0);
41122 unsigned NumBits = VT.getSizeInBits();
41124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41126 // TODO - Constant Folding.
41127 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
41128 // Reduce Cst1 to the bottom 16-bits.
41129 // NOTE: SimplifyDemandedBits won't do this for constants.
41130 const APInt &Val1 = Cst1->getAPIntValue();
41131 APInt MaskedVal1 = Val1 & 0xFFFF;
41132 if (MaskedVal1 != Val1)
41133 return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
41134 DAG.getConstant(MaskedVal1, SDLoc(N), VT));
41137 // Only bottom 16-bits of the control bits are required.
41138 APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
41139 if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
41140 return SDValue(N, 0);
41145 static bool isNullFPScalarOrVectorConst(SDValue V) {
41146 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
41149 /// If a value is a scalar FP zero or a vector FP zero (potentially including
41150 /// undefined elements), return a zero constant that may be used to fold away
41151 /// that value. In the case of a vector, the returned constant will not contain
41152 /// undefined elements even if the input parameter does. This makes it suitable
41153 /// to be used as a replacement operand with operations (eg, bitwise-and) where
41154 /// an undef should not propagate.
41155 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
41156 const X86Subtarget &Subtarget) {
41157 if (!isNullFPScalarOrVectorConst(V))
41160 if (V.getValueType().isVector())
41161 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
41166 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
41167 const X86Subtarget &Subtarget) {
41168 SDValue N0 = N->getOperand(0);
41169 SDValue N1 = N->getOperand(1);
41170 EVT VT = N->getValueType(0);
41173 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
41174 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
41175 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
41176 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
41179 auto isAllOnesConstantFP = [](SDValue V) {
41180 if (V.getSimpleValueType().isVector())
41181 return ISD::isBuildVectorAllOnes(V.getNode());
41182 auto *C = dyn_cast<ConstantFPSDNode>(V);
41183 return C && C->getConstantFPValue()->isAllOnesValue();
41186 // fand (fxor X, -1), Y --> fandn X, Y
41187 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
41188 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
41190 // fand X, (fxor Y, -1) --> fandn Y, X
41191 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
41192 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
41197 /// Do target-specific dag combines on X86ISD::FAND nodes.
41198 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
41199 const X86Subtarget &Subtarget) {
41200 // FAND(0.0, x) -> 0.0
41201 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
41204 // FAND(x, 0.0) -> 0.0
41205 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41208 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
41211 return lowerX86FPLogicOp(N, DAG, Subtarget);
41214 /// Do target-specific dag combines on X86ISD::FANDN nodes.
41215 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
41216 const X86Subtarget &Subtarget) {
41217 // FANDN(0.0, x) -> x
41218 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41219 return N->getOperand(1);
41221 // FANDN(x, 0.0) -> 0.0
41222 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41225 return lowerX86FPLogicOp(N, DAG, Subtarget);
41228 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
41229 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
41230 const X86Subtarget &Subtarget) {
41231 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
41233 // F[X]OR(0.0, x) -> x
41234 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41235 return N->getOperand(1);
41237 // F[X]OR(x, 0.0) -> x
41238 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
41239 return N->getOperand(0);
41241 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
41244 return lowerX86FPLogicOp(N, DAG, Subtarget);
41247 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
41248 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
41249 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
41251 // Only perform optimizations if UnsafeMath is used.
41252 if (!DAG.getTarget().Options.UnsafeFPMath)
41255 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
41256 // into FMINC and FMAXC, which are Commutative operations.
41257 unsigned NewOp = 0;
41258 switch (N->getOpcode()) {
41259 default: llvm_unreachable("unknown opcode");
41260 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
41261 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
41264 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
41265 N->getOperand(0), N->getOperand(1));
41268 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
41269 const X86Subtarget &Subtarget) {
41270 if (Subtarget.useSoftFloat())
41273 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41275 EVT VT = N->getValueType(0);
41276 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
41277 (Subtarget.hasSSE2() && VT == MVT::f64) ||
41278 (VT.isVector() && TLI.isTypeLegal(VT))))
41281 SDValue Op0 = N->getOperand(0);
41282 SDValue Op1 = N->getOperand(1);
41284 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
41286 // If we don't have to respect NaN inputs, this is a direct translation to x86
41287 // min/max instructions.
41288 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
41289 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41291 // If one of the operands is known non-NaN use the native min/max instructions
41292 // with the non-NaN input as second operand.
41293 if (DAG.isKnownNeverNaN(Op1))
41294 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41295 if (DAG.isKnownNeverNaN(Op0))
41296 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
41298 // If we have to respect NaN inputs, this takes at least 3 instructions.
41299 // Favor a library call when operating on a scalar and minimizing code size.
41300 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
41303 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
41306 // There are 4 possibilities involving NaN inputs, and these are the required
41310 // ----------------
41311 // Num | Max | Op0 |
41312 // Op0 ----------------
41313 // NaN | Op1 | NaN |
41314 // ----------------
41316 // The SSE FP max/min instructions were not designed for this case, but rather
41318 // Min = Op1 < Op0 ? Op1 : Op0
41319 // Max = Op1 > Op0 ? Op1 : Op0
41321 // So they always return Op0 if either input is a NaN. However, we can still
41322 // use those instructions for fmaxnum by selecting away a NaN input.
41324 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
41325 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
41326 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
41328 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
41329 // are NaN, the NaN value of Op1 is the result.
41330 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
41333 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
41334 TargetLowering::DAGCombinerInfo &DCI) {
41335 EVT VT = N->getValueType(0);
41336 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41338 APInt KnownUndef, KnownZero;
41339 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
41340 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
41342 return SDValue(N, 0);
41344 // Convert a full vector load into vzload when not all bits are needed.
41345 SDValue In = N->getOperand(0);
41346 MVT InVT = In.getSimpleValueType();
41347 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41348 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41349 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41350 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41351 // Unless the load is volatile.
41352 if (!LN->isVolatile()) {
41354 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41355 MVT MemVT = MVT::getIntegerVT(NumBits);
41356 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41357 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41358 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41360 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41361 LN->getPointerInfo(),
41362 LN->getAlignment(),
41363 LN->getMemOperand()->getFlags());
41364 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41365 DAG.getBitcast(InVT, VZLoad));
41366 DCI.CombineTo(N, Convert);
41367 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41368 return SDValue(N, 0);
41375 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
41376 TargetLowering::DAGCombinerInfo &DCI) {
41377 EVT VT = N->getValueType(0);
41379 // Convert a full vector load into vzload when not all bits are needed.
41380 SDValue In = N->getOperand(0);
41381 MVT InVT = In.getSimpleValueType();
41382 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41383 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41384 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41385 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41386 // Unless the load is volatile.
41387 if (!LN->isVolatile()) {
41389 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41390 MVT MemVT = MVT::getFloatingPointVT(NumBits);
41391 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41392 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41393 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41395 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41396 LN->getPointerInfo(),
41397 LN->getAlignment(),
41398 LN->getMemOperand()->getFlags());
41399 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41400 DAG.getBitcast(InVT, VZLoad));
41401 DCI.CombineTo(N, Convert);
41402 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41403 return SDValue(N, 0);
41410 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
41411 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
41412 TargetLowering::DAGCombinerInfo &DCI,
41413 const X86Subtarget &Subtarget) {
41414 MVT VT = N->getSimpleValueType(0);
41416 // ANDNP(0, x) -> x
41417 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
41418 return N->getOperand(1);
41420 // ANDNP(x, 0) -> 0
41421 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
41422 return DAG.getConstant(0, SDLoc(N), VT);
41424 // Turn ANDNP back to AND if input is inverted.
41425 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
41426 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
41429 // Attempt to recursively combine a bitmask ANDNP with shuffles.
41430 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
41432 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
41439 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
41440 TargetLowering::DAGCombinerInfo &DCI) {
41441 SDValue N0 = N->getOperand(0);
41442 SDValue N1 = N->getOperand(1);
41444 // BT ignores high bits in the bit index operand.
41445 unsigned BitWidth = N1.getValueSizeInBits();
41446 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
41447 if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
41448 return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
41453 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
41454 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
41455 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
41457 EVT DstVT = N->getValueType(0);
41459 SDValue N0 = N->getOperand(0);
41460 SDValue N1 = N->getOperand(1);
41461 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
41463 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
41466 // Look through single use any_extends / truncs.
41467 SDValue IntermediateBitwidthOp;
41468 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
41470 IntermediateBitwidthOp = N0;
41471 N0 = N0.getOperand(0);
41474 // See if we have a single use cmov.
41475 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
41478 SDValue CMovOp0 = N0.getOperand(0);
41479 SDValue CMovOp1 = N0.getOperand(1);
41481 // Make sure both operands are constants.
41482 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
41483 !isa<ConstantSDNode>(CMovOp1.getNode()))
41488 // If we looked through an any_extend/trunc above, add one to the constants.
41489 if (IntermediateBitwidthOp) {
41490 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
41491 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
41492 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
41495 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
41496 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
41498 EVT CMovVT = DstVT;
41499 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
41500 if (DstVT == MVT::i16) {
41502 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
41503 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
41506 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
41507 N0.getOperand(2), N0.getOperand(3));
41509 if (CMovVT != DstVT)
41510 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
41515 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
41516 const X86Subtarget &Subtarget) {
41517 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
41519 if (SDValue V = combineSextInRegCmov(N, DAG))
41522 EVT VT = N->getValueType(0);
41523 SDValue N0 = N->getOperand(0);
41524 SDValue N1 = N->getOperand(1);
41525 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
41528 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
41529 // both SSE and AVX2 since there is no sign-extended shift right
41530 // operation on a vector with 64-bit elements.
41531 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
41532 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
41533 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
41534 N0.getOpcode() == ISD::SIGN_EXTEND)) {
41535 SDValue N00 = N0.getOperand(0);
41537 // EXTLOAD has a better solution on AVX2,
41538 // it may be replaced with X86ISD::VSEXT node.
41539 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
41540 if (!ISD::isNormalLoad(N00.getNode()))
41543 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
41544 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
41546 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
41552 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
41553 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
41554 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
41555 /// opportunities to combine math ops, use an LEA, or use a complex addressing
41556 /// mode. This can eliminate extend, add, and shift instructions.
41557 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
41558 const X86Subtarget &Subtarget) {
41559 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
41560 Ext->getOpcode() != ISD::ZERO_EXTEND)
41563 // TODO: This should be valid for other integer types.
41564 EVT VT = Ext->getValueType(0);
41565 if (VT != MVT::i64)
41568 SDValue Add = Ext->getOperand(0);
41569 if (Add.getOpcode() != ISD::ADD)
41572 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
41573 bool NSW = Add->getFlags().hasNoSignedWrap();
41574 bool NUW = Add->getFlags().hasNoUnsignedWrap();
41576 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
41578 if ((Sext && !NSW) || (!Sext && !NUW))
41581 // Having a constant operand to the 'add' ensures that we are not increasing
41582 // the instruction count because the constant is extended for free below.
41583 // A constant operand can also become the displacement field of an LEA.
41584 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
41588 // Don't make the 'add' bigger if there's no hope of combining it with some
41589 // other 'add' or 'shl' instruction.
41590 // TODO: It may be profitable to generate simpler LEA instructions in place
41591 // of single 'add' instructions, but the cost model for selecting an LEA
41592 // currently has a high threshold.
41593 bool HasLEAPotential = false;
41594 for (auto *User : Ext->uses()) {
41595 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
41596 HasLEAPotential = true;
41600 if (!HasLEAPotential)
41603 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
41604 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
41605 SDValue AddOp0 = Add.getOperand(0);
41606 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
41607 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
41609 // The wider add is guaranteed to not wrap because both operands are
41612 Flags.setNoSignedWrap(NSW);
41613 Flags.setNoUnsignedWrap(NUW);
41614 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
41617 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
41618 // operands and the result of CMOV is not used anywhere else - promote CMOV
41619 // itself instead of promoting its result. This could be beneficial, because:
41620 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
41621 // (or more) pseudo-CMOVs only when they go one-after-another and
41622 // getting rid of result extension code after CMOV will help that.
41623 // 2) Promotion of constant CMOV arguments is free, hence the
41624 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
41625 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
41626 // promotion is also good in terms of code-size.
41627 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
41629 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
41630 SDValue CMovN = Extend->getOperand(0);
41631 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
41634 EVT TargetVT = Extend->getValueType(0);
41635 unsigned ExtendOpcode = Extend->getOpcode();
41638 EVT VT = CMovN.getValueType();
41639 SDValue CMovOp0 = CMovN.getOperand(0);
41640 SDValue CMovOp1 = CMovN.getOperand(1);
41642 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
41643 !isa<ConstantSDNode>(CMovOp1.getNode()))
41646 // Only extend to i32 or i64.
41647 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
41650 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
41652 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
41655 // If this a zero extend to i64, we should only extend to i32 and use a free
41656 // zero extend to finish.
41657 EVT ExtendVT = TargetVT;
41658 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
41659 ExtendVT = MVT::i32;
41661 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
41662 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
41664 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
41665 CMovN.getOperand(2), CMovN.getOperand(3));
41667 // Finish extending if needed.
41668 if (ExtendVT != TargetVT)
41669 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
41674 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
41675 // This is more or less the reverse of combineBitcastvxi1.
41677 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
41678 TargetLowering::DAGCombinerInfo &DCI,
41679 const X86Subtarget &Subtarget) {
41680 unsigned Opcode = N->getOpcode();
41681 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
41682 Opcode != ISD::ANY_EXTEND)
41684 if (!DCI.isBeforeLegalizeOps())
41686 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
41689 SDValue N0 = N->getOperand(0);
41690 EVT VT = N->getValueType(0);
41691 EVT SVT = VT.getScalarType();
41692 EVT InSVT = N0.getValueType().getScalarType();
41693 unsigned EltSizeInBits = SVT.getSizeInBits();
41695 // Input type must be extending a bool vector (bit-casted from a scalar
41696 // integer) to legal integer types.
41697 if (!VT.isVector())
41699 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
41701 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
41704 SDValue N00 = N0.getOperand(0);
41705 EVT SclVT = N0.getOperand(0).getValueType();
41706 if (!SclVT.isScalarInteger())
41711 SmallVector<int, 32> ShuffleMask;
41712 unsigned NumElts = VT.getVectorNumElements();
41713 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
41715 // Broadcast the scalar integer to the vector elements.
41716 if (NumElts > EltSizeInBits) {
41717 // If the scalar integer is greater than the vector element size, then we
41718 // must split it down into sub-sections for broadcasting. For example:
41719 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
41720 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
41721 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
41722 unsigned Scale = NumElts / EltSizeInBits;
41724 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
41725 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
41726 Vec = DAG.getBitcast(VT, Vec);
41728 for (unsigned i = 0; i != Scale; ++i)
41729 ShuffleMask.append(EltSizeInBits, i);
41731 // For smaller scalar integers, we can simply any-extend it to the vector
41732 // element size (we don't care about the upper bits) and broadcast it to all
41734 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
41735 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
41736 ShuffleMask.append(NumElts, 0);
41738 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
41740 // Now, mask the relevant bit in each element.
41741 SmallVector<SDValue, 32> Bits;
41742 for (unsigned i = 0; i != NumElts; ++i) {
41743 int BitIdx = (i % EltSizeInBits);
41744 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
41745 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
41747 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
41748 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
41750 // Compare against the bitmask and extend the result.
41751 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
41752 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
41753 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
41755 // For SEXT, this is now done, otherwise shift the result down for
41757 if (Opcode == ISD::SIGN_EXTEND)
41759 return DAG.getNode(ISD::SRL, DL, VT, Vec,
41760 DAG.getConstant(EltSizeInBits - 1, DL, VT));
41763 /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
41764 /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
41765 /// with UNDEFs) of the input to vectors of the same size as the target type
41766 /// which then extends the lowest elements.
41767 static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
41768 TargetLowering::DAGCombinerInfo &DCI,
41769 const X86Subtarget &Subtarget) {
41770 if (ExperimentalVectorWideningLegalization)
41773 unsigned Opcode = N->getOpcode();
41774 // TODO - add ANY_EXTEND support.
41775 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
41777 if (!DCI.isBeforeLegalizeOps())
41779 if (!Subtarget.hasSSE2())
41782 SDValue N0 = N->getOperand(0);
41783 EVT VT = N->getValueType(0);
41784 EVT SVT = VT.getScalarType();
41785 EVT InVT = N0.getValueType();
41786 EVT InSVT = InVT.getScalarType();
41788 // FIXME: Generic DAGCombiner previously had a bug that would cause a
41789 // sign_extend of setcc to sometimes return the original node and tricked it
41790 // into thinking CombineTo was used which prevented the target combines from
41792 // Earlying out here to avoid regressions like this
41793 // (v4i32 (sext (v4i1 (setcc (v4i16)))))
41795 // (v4i32 (sext_invec (v8i16 (concat (v4i16 (setcc (v4i16))), undef))))
41796 // Type legalized to
41797 // (v4i32 (sext_invec (v8i16 (trunc_invec (v4i32 (setcc (v4i32)))))))
41798 // Leading to a packssdw+pmovsxwd
41799 // We could write a DAG combine to fix this, but really we shouldn't be
41800 // creating sext_invec that's forcing v8i16 into the DAG.
41801 if (N0.getOpcode() == ISD::SETCC)
41804 // Input type must be a vector and we must be extending legal integer types.
41805 if (!VT.isVector() || VT.getVectorNumElements() < 2)
41807 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
41809 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
41812 // If the input/output types are both legal then we have at least AVX1 and
41813 // we will be able to use SIGN_EXTEND/ZERO_EXTEND directly.
41814 if (DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
41815 DAG.getTargetLoweringInfo().isTypeLegal(InVT))
41820 auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
41821 EVT SrcVT = N.getValueType();
41822 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
41823 Size / SrcVT.getScalarSizeInBits());
41824 SmallVector<SDValue, 8> Opnds(Size / SrcVT.getSizeInBits(),
41825 DAG.getUNDEF(SrcVT));
41827 return DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Opnds);
41830 // If target-size is less than 128-bits, extend to a type that would extend
41831 // to 128 bits, extend that and extract the original target vector.
41832 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
41833 unsigned Scale = 128 / VT.getSizeInBits();
41835 EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
41836 SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
41837 SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
41838 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
41839 DAG.getIntPtrConstant(0, DL));
41842 // If target-size is 128-bits (or 256-bits on AVX target), then convert to
41843 // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
41844 // Also use this if we don't have SSE41 to allow the legalizer do its job.
41845 if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
41846 (VT.is256BitVector() && Subtarget.hasAVX()) ||
41847 (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
41848 SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
41849 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
41850 return DAG.getNode(Opcode, DL, VT, ExOp);
41853 auto SplitAndExtendInReg = [&](unsigned SplitSize) {
41854 unsigned NumVecs = VT.getSizeInBits() / SplitSize;
41855 unsigned NumSubElts = SplitSize / SVT.getSizeInBits();
41856 EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
41857 EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
41859 unsigned IROpc = getOpcode_EXTEND_VECTOR_INREG(Opcode);
41860 SmallVector<SDValue, 8> Opnds;
41861 for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
41862 SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
41863 DAG.getIntPtrConstant(Offset, DL));
41864 SrcVec = ExtendVecSize(DL, SrcVec, SplitSize);
41865 SrcVec = DAG.getNode(IROpc, DL, SubVT, SrcVec);
41866 Opnds.push_back(SrcVec);
41868 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
41871 // On pre-AVX targets, split into 128-bit nodes of
41872 // ISD::*_EXTEND_VECTOR_INREG.
41873 if (!Subtarget.hasAVX() && !(VT.getSizeInBits() % 128))
41874 return SplitAndExtendInReg(128);
41876 // On pre-AVX512 targets, split into 256-bit nodes of
41877 // ISD::*_EXTEND_VECTOR_INREG.
41878 if (!Subtarget.useAVX512Regs() && !(VT.getSizeInBits() % 256))
41879 return SplitAndExtendInReg(256);
41884 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
41886 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
41887 const X86Subtarget &Subtarget) {
41888 SDValue N0 = N->getOperand(0);
41889 EVT VT = N->getValueType(0);
41892 // Only do this combine with AVX512 for vector extends.
41893 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
41896 // Only combine legal element types.
41897 EVT SVT = VT.getVectorElementType();
41898 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
41899 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
41902 // We can only do this if the vector size in 256 bits or less.
41903 unsigned Size = VT.getSizeInBits();
41907 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
41908 // that's the only integer compares with we have.
41909 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
41910 if (ISD::isUnsignedIntSetCC(CC))
41913 // Only do this combine if the extension will be fully consumed by the setcc.
41914 EVT N00VT = N0.getOperand(0).getValueType();
41915 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
41916 if (Size != MatchingVecType.getSizeInBits())
41919 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
41921 if (N->getOpcode() == ISD::ZERO_EXTEND)
41922 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
41927 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
41928 TargetLowering::DAGCombinerInfo &DCI,
41929 const X86Subtarget &Subtarget) {
41930 SDValue N0 = N->getOperand(0);
41931 EVT VT = N->getValueType(0);
41932 EVT InVT = N0.getValueType();
41935 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
41938 if (!DCI.isBeforeLegalizeOps())
41941 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
41944 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
41945 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
41946 // Invert and sign-extend a boolean is the same as zero-extend and subtract
41947 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
41948 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
41949 // sext (xor Bool, -1) --> sub (zext Bool), 1
41950 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
41951 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
41954 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
41957 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
41961 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
41964 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
41970 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
41973 default: llvm_unreachable("Unexpected opcode");
41974 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
41975 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
41976 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
41977 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
41978 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
41979 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
41980 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
41981 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
41987 default: llvm_unreachable("Unexpected opcode");
41988 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
41989 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41990 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
41991 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41992 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
41993 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41994 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
41995 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
42002 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
42003 const X86Subtarget &Subtarget) {
42005 EVT VT = N->getValueType(0);
42007 // Let legalize expand this if it isn't a legal type yet.
42008 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
42011 EVT ScalarVT = VT.getScalarType();
42012 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
42015 SDValue A = N->getOperand(0);
42016 SDValue B = N->getOperand(1);
42017 SDValue C = N->getOperand(2);
42019 auto invertIfNegative = [&DAG](SDValue &V) {
42020 if (SDValue NegVal = isFNEG(DAG, V.getNode())) {
42021 V = DAG.getBitcast(V.getValueType(), NegVal);
42024 // Look through extract_vector_elts. If it comes from an FNEG, create a
42025 // new extract from the FNEG input.
42026 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42027 isNullConstant(V.getOperand(1))) {
42028 if (SDValue NegVal = isFNEG(DAG, V.getOperand(0).getNode())) {
42029 NegVal = DAG.getBitcast(V.getOperand(0).getValueType(), NegVal);
42030 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
42031 NegVal, V.getOperand(1));
42039 // Do not convert the passthru input of scalar intrinsics.
42040 // FIXME: We could allow negations of the lower element only.
42041 bool NegA = invertIfNegative(A);
42042 bool NegB = invertIfNegative(B);
42043 bool NegC = invertIfNegative(C);
42045 if (!NegA && !NegB && !NegC)
42048 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
42050 if (N->getNumOperands() == 4)
42051 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
42052 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
42055 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
42056 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
42057 const X86Subtarget &Subtarget) {
42059 EVT VT = N->getValueType(0);
42061 SDValue NegVal = isFNEG(DAG, N->getOperand(2).getNode());
42065 // FIXME: Should we bitcast instead?
42066 if (NegVal.getValueType() != VT)
42069 unsigned NewOpcode;
42070 switch (N->getOpcode()) {
42071 default: llvm_unreachable("Unexpected opcode!");
42072 case X86ISD::FMADDSUB: NewOpcode = X86ISD::FMSUBADD; break;
42073 case X86ISD::FMADDSUB_RND: NewOpcode = X86ISD::FMSUBADD_RND; break;
42074 case X86ISD::FMSUBADD: NewOpcode = X86ISD::FMADDSUB; break;
42075 case X86ISD::FMSUBADD_RND: NewOpcode = X86ISD::FMADDSUB_RND; break;
42078 if (N->getNumOperands() == 4)
42079 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42080 NegVal, N->getOperand(3));
42081 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42085 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
42086 TargetLowering::DAGCombinerInfo &DCI,
42087 const X86Subtarget &Subtarget) {
42088 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
42089 // (and (i32 x86isd::setcc_carry), 1)
42090 // This eliminates the zext. This transformation is necessary because
42091 // ISD::SETCC is always legalized to i8.
42093 SDValue N0 = N->getOperand(0);
42094 EVT VT = N->getValueType(0);
42096 if (N0.getOpcode() == ISD::AND &&
42098 N0.getOperand(0).hasOneUse()) {
42099 SDValue N00 = N0.getOperand(0);
42100 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42101 if (!isOneConstant(N0.getOperand(1)))
42103 return DAG.getNode(ISD::AND, dl, VT,
42104 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42105 N00.getOperand(0), N00.getOperand(1)),
42106 DAG.getConstant(1, dl, VT));
42110 if (N0.getOpcode() == ISD::TRUNCATE &&
42112 N0.getOperand(0).hasOneUse()) {
42113 SDValue N00 = N0.getOperand(0);
42114 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42115 return DAG.getNode(ISD::AND, dl, VT,
42116 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42117 N00.getOperand(0), N00.getOperand(1)),
42118 DAG.getConstant(1, dl, VT));
42122 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
42125 if (DCI.isBeforeLegalizeOps())
42126 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
42129 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
42132 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
42136 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
42139 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
42142 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
42145 // TODO: Combine with any target/faux shuffle.
42146 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
42147 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
42148 SDValue N00 = N0.getOperand(0);
42149 SDValue N01 = N0.getOperand(1);
42150 unsigned NumSrcElts = N00.getValueType().getVectorNumElements();
42151 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
42152 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
42153 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
42154 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
42155 return concatSubVectors(N00, N01, VT, NumSrcElts * 2, DAG, dl, 128);
42162 /// Try to map a 128-bit or larger integer comparison to vector instructions
42163 /// before type legalization splits it up into chunks.
42164 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
42165 const X86Subtarget &Subtarget) {
42166 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
42167 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
42169 // We're looking for an oversized integer equality comparison.
42170 SDValue X = SetCC->getOperand(0);
42171 SDValue Y = SetCC->getOperand(1);
42172 EVT OpVT = X.getValueType();
42173 unsigned OpSize = OpVT.getSizeInBits();
42174 if (!OpVT.isScalarInteger() || OpSize < 128)
42177 // Ignore a comparison with zero because that gets special treatment in
42178 // EmitTest(). But make an exception for the special case of a pair of
42179 // logically-combined vector-sized operands compared to zero. This pattern may
42180 // be generated by the memcmp expansion pass with oversized integer compares
42182 bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
42183 X.getOperand(0).getOpcode() == ISD::XOR &&
42184 X.getOperand(1).getOpcode() == ISD::XOR;
42185 if (isNullConstant(Y) && !IsOrXorXorCCZero)
42188 // Don't perform this combine if constructing the vector will be expensive.
42189 auto IsVectorBitCastCheap = [](SDValue X) {
42190 X = peekThroughBitcasts(X);
42191 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
42192 X.getOpcode() == ISD::LOAD;
42194 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
42198 // TODO: Use PXOR + PTEST for SSE4.1 or later?
42199 EVT VT = SetCC->getValueType(0);
42201 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
42202 (OpSize == 256 && Subtarget.hasAVX2()) ||
42203 (OpSize == 512 && Subtarget.useAVX512Regs())) {
42204 EVT VecVT = OpSize == 512 ? MVT::v16i32 :
42205 OpSize == 256 ? MVT::v32i8 :
42207 EVT CmpVT = OpSize == 512 ? MVT::v16i1 : VecVT;
42209 if (IsOrXorXorCCZero) {
42210 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
42211 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
42212 // Use 2 vector equality compares and 'and' the results before doing a
42214 SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
42215 SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
42216 SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
42217 SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
42218 SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
42219 SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
42220 Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
42222 SDValue VecX = DAG.getBitcast(VecVT, X);
42223 SDValue VecY = DAG.getBitcast(VecVT, Y);
42224 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
42226 // For 512-bits we want to emit a setcc that will lower to kortest.
42228 return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i16, Cmp),
42229 DAG.getConstant(0xFFFF, DL, MVT::i16), CC);
42230 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
42231 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
42232 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
42233 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
42234 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
42235 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
42236 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
42238 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
42244 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
42245 const X86Subtarget &Subtarget) {
42246 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
42247 SDValue LHS = N->getOperand(0);
42248 SDValue RHS = N->getOperand(1);
42249 EVT VT = N->getValueType(0);
42250 EVT OpVT = LHS.getValueType();
42253 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
42254 // 0-x == y --> x+y == 0
42255 // 0-x != y --> x+y != 0
42256 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
42258 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
42259 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42261 // x == 0-y --> x+y == 0
42262 // x != 0-y --> x+y != 0
42263 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
42265 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
42266 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42269 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
42273 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42274 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
42275 // Put build_vectors on the right.
42276 if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
42277 std::swap(LHS, RHS);
42278 CC = ISD::getSetCCSwappedOperands(CC);
42282 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
42283 (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
42284 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
42286 if (IsSEXT0 && IsVZero1) {
42287 assert(VT == LHS.getOperand(0).getValueType() &&
42288 "Uexpected operand type");
42289 if (CC == ISD::SETGT)
42290 return DAG.getConstant(0, DL, VT);
42291 if (CC == ISD::SETLE)
42292 return DAG.getConstant(1, DL, VT);
42293 if (CC == ISD::SETEQ || CC == ISD::SETGE)
42294 return DAG.getNOT(DL, LHS.getOperand(0), VT);
42296 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
42297 "Unexpected condition code!");
42298 return LHS.getOperand(0);
42302 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
42303 // pre-promote its result type since vXi1 vectors don't get promoted
42304 // during type legalization.
42305 // NOTE: The element count check is to ignore operand types that need to
42306 // go through type promotion to a 128-bit vector.
42307 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
42308 VT.getVectorElementType() == MVT::i1 &&
42309 (ExperimentalVectorWideningLegalization ||
42310 VT.getVectorNumElements() > 4) &&
42311 (OpVT.getVectorElementType() == MVT::i8 ||
42312 OpVT.getVectorElementType() == MVT::i16)) {
42313 SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
42315 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
42318 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
42319 // to avoid scalarization via legalization because v4i32 is not a legal type.
42320 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
42321 LHS.getValueType() == MVT::v4f32)
42322 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
42327 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
42328 TargetLowering::DAGCombinerInfo &DCI) {
42329 SDValue Src = N->getOperand(0);
42330 MVT SrcVT = Src.getSimpleValueType();
42331 MVT VT = N->getSimpleValueType(0);
42332 unsigned NumBits = VT.getScalarSizeInBits();
42333 unsigned NumElts = SrcVT.getVectorNumElements();
42335 // Perform constant folding.
42336 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
42337 assert(VT == MVT::i32 && "Unexpected result type");
42339 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
42340 if (!Src.getOperand(Idx).isUndef() &&
42341 Src.getConstantOperandAPInt(Idx).isNegative())
42344 return DAG.getConstant(Imm, SDLoc(N), VT);
42347 // Look through int->fp bitcasts that don't change the element width.
42348 unsigned EltWidth = SrcVT.getScalarSizeInBits();
42349 if (Src.getOpcode() == ISD::BITCAST &&
42350 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
42351 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
42353 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
42354 // with scalar comparisons.
42355 if (SDValue NotSrc = IsNOT(Src, DAG)) {
42357 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
42358 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
42359 return DAG.getNode(ISD::XOR, DL, VT,
42360 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
42361 DAG.getConstant(NotMask, DL, VT));
42364 // Simplify the inputs.
42365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42366 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
42367 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
42368 return SDValue(N, 0);
42373 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
42374 TargetLowering::DAGCombinerInfo &DCI,
42375 const X86Subtarget &Subtarget) {
42378 if (DCI.isBeforeLegalizeOps()) {
42379 SDValue Index = N->getOperand(4);
42380 // Remove any sign extends from 32 or smaller to larger than 32.
42381 // Only do this before LegalizeOps in case we need the sign extend for
42383 if (Index.getOpcode() == ISD::SIGN_EXTEND) {
42384 if (Index.getScalarValueSizeInBits() > 32 &&
42385 Index.getOperand(0).getScalarValueSizeInBits() <= 32) {
42386 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42387 NewOps[4] = Index.getOperand(0);
42388 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42390 // The original sign extend has less users, add back to worklist in
42391 // case it needs to be removed
42392 DCI.AddToWorklist(Index.getNode());
42393 DCI.AddToWorklist(N);
42395 return SDValue(Res, 0);
42399 // Make sure the index is either i32 or i64
42400 unsigned ScalarSize = Index.getScalarValueSizeInBits();
42401 if (ScalarSize != 32 && ScalarSize != 64) {
42402 MVT EltVT = ScalarSize > 32 ? MVT::i64 : MVT::i32;
42403 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
42404 Index.getValueType().getVectorNumElements());
42405 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
42406 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42408 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42410 DCI.AddToWorklist(N);
42411 return SDValue(Res, 0);
42414 // Try to remove zero extends from 32->64 if we know the sign bit of
42415 // the input is zero.
42416 if (Index.getOpcode() == ISD::ZERO_EXTEND &&
42417 Index.getScalarValueSizeInBits() == 64 &&
42418 Index.getOperand(0).getScalarValueSizeInBits() == 32) {
42419 if (DAG.SignBitIsZero(Index.getOperand(0))) {
42420 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
42421 NewOps[4] = Index.getOperand(0);
42422 SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
42424 // The original sign extend has less users, add back to worklist in
42425 // case it needs to be removed
42426 DCI.AddToWorklist(Index.getNode());
42427 DCI.AddToWorklist(N);
42429 return SDValue(Res, 0);
42434 // With AVX2 we only demand the upper bit of the mask.
42435 if (!Subtarget.hasAVX512()) {
42436 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42437 SDValue Mask = N->getOperand(2);
42438 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42439 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42440 return SDValue(N, 0);
42446 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
42447 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
42448 const X86Subtarget &Subtarget) {
42450 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
42451 SDValue EFLAGS = N->getOperand(1);
42453 // Try to simplify the EFLAGS and condition code operands.
42454 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
42455 return getSETCC(CC, Flags, DL, DAG);
42460 /// Optimize branch condition evaluation.
42461 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
42462 const X86Subtarget &Subtarget) {
42464 SDValue EFLAGS = N->getOperand(3);
42465 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
42467 // Try to simplify the EFLAGS and condition code operands.
42468 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
42469 // RAUW them under us.
42470 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
42471 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
42472 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
42473 N->getOperand(1), Cond, Flags);
42479 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
42480 SelectionDAG &DAG) {
42481 // Take advantage of vector comparisons producing 0 or -1 in each lane to
42482 // optimize away operation when it's from a constant.
42484 // The general transformation is:
42485 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
42486 // AND(VECTOR_CMP(x,y), constant2)
42487 // constant2 = UNARYOP(constant)
42489 // Early exit if this isn't a vector operation, the operand of the
42490 // unary operation isn't a bitwise AND, or if the sizes of the operations
42491 // aren't the same.
42492 EVT VT = N->getValueType(0);
42493 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
42494 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
42495 VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
42498 // Now check that the other operand of the AND is a constant. We could
42499 // make the transformation for non-constant splats as well, but it's unclear
42500 // that would be a benefit as it would not eliminate any operations, just
42501 // perform one more step in scalar code before moving to the vector unit.
42502 if (auto *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(0).getOperand(1))) {
42503 // Bail out if the vector isn't a constant.
42504 if (!BV->isConstant())
42507 // Everything checks out. Build up the new and improved node.
42509 EVT IntVT = BV->getValueType(0);
42510 // Create a new constant of the appropriate type for the transformed
42512 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
42513 // The AND node needs bitcasts to/from an integer vector type around it.
42514 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
42515 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
42516 N->getOperand(0)->getOperand(0), MaskConst);
42517 SDValue Res = DAG.getBitcast(VT, NewAnd);
42524 /// If we are converting a value to floating-point, try to replace scalar
42525 /// truncate of an extracted vector element with a bitcast. This tries to keep
42526 /// the sequence on XMM registers rather than moving between vector and GPRs.
42527 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
42528 // TODO: This is currently only used by combineSIntToFP, but it is generalized
42529 // to allow being called by any similar cast opcode.
42530 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
42531 SDValue Trunc = N->getOperand(0);
42532 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
42535 SDValue ExtElt = Trunc.getOperand(0);
42536 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42537 !isNullConstant(ExtElt.getOperand(1)))
42540 EVT TruncVT = Trunc.getValueType();
42541 EVT SrcVT = ExtElt.getValueType();
42542 unsigned DestWidth = TruncVT.getSizeInBits();
42543 unsigned SrcWidth = SrcVT.getSizeInBits();
42544 if (SrcWidth % DestWidth != 0)
42547 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
42548 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
42549 unsigned VecWidth = SrcVecVT.getSizeInBits();
42550 unsigned NumElts = VecWidth / DestWidth;
42551 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
42552 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
42554 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
42555 BitcastVec, ExtElt.getOperand(1));
42556 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
42559 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
42560 const X86Subtarget &Subtarget) {
42561 SDValue Op0 = N->getOperand(0);
42562 EVT VT = N->getValueType(0);
42563 EVT InVT = Op0.getValueType();
42565 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
42566 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
42567 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
42568 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
42570 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42571 InVT.getVectorNumElements());
42572 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
42574 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
42575 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
42578 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
42579 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
42580 // the optimization here.
42581 if (DAG.SignBitIsZero(Op0))
42582 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
42587 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
42588 const X86Subtarget &Subtarget) {
42589 // First try to optimize away the conversion entirely when it's
42590 // conditionally from a constant. Vectors only.
42591 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
42594 // Now move on to more general possibilities.
42595 SDValue Op0 = N->getOperand(0);
42596 EVT VT = N->getValueType(0);
42597 EVT InVT = Op0.getValueType();
42599 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
42600 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
42601 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
42602 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
42604 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42605 InVT.getVectorNumElements());
42606 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
42607 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
42610 // Without AVX512DQ we only support i64 to float scalar conversion. For both
42611 // vectors and scalars, see if we know that the upper bits are all the sign
42612 // bit, in which case we can truncate the input to i32 and convert from that.
42613 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
42614 unsigned BitWidth = InVT.getScalarSizeInBits();
42615 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
42616 if (NumSignBits >= (BitWidth - 31)) {
42617 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32);
42618 if (InVT.isVector())
42619 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
42620 InVT.getVectorNumElements());
42622 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
42623 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
42627 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
42628 // a 32-bit target where SSE doesn't support i64->FP operations.
42629 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
42630 Op0.getOpcode() == ISD::LOAD) {
42631 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
42632 EVT LdVT = Ld->getValueType(0);
42634 // This transformation is not supported if the result type is f16 or f128.
42635 if (VT == MVT::f16 || VT == MVT::f128)
42638 // If we have AVX512DQ we can use packed conversion instructions unless
42640 if (Subtarget.hasDQI() && VT != MVT::f80)
42643 if (!Ld->isVolatile() && !VT.isVector() &&
42644 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
42645 !Subtarget.is64Bit() && LdVT == MVT::i64) {
42646 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
42647 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
42648 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
42653 if (SDValue V = combineToFPTruncExtElt(N, DAG))
42659 static bool needCarryOrOverflowFlag(SDValue Flags) {
42660 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
42662 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
42664 SDNode *User = *UI;
42667 switch (User->getOpcode()) {
42669 // Be conservative.
42671 case X86ISD::SETCC:
42672 case X86ISD::SETCC_CARRY:
42673 CC = (X86::CondCode)User->getConstantOperandVal(0);
42675 case X86ISD::BRCOND:
42676 CC = (X86::CondCode)User->getConstantOperandVal(2);
42679 CC = (X86::CondCode)User->getConstantOperandVal(2);
42685 case X86::COND_A: case X86::COND_AE:
42686 case X86::COND_B: case X86::COND_BE:
42687 case X86::COND_O: case X86::COND_NO:
42688 case X86::COND_G: case X86::COND_GE:
42689 case X86::COND_L: case X86::COND_LE:
42697 static bool onlyZeroFlagUsed(SDValue Flags) {
42698 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
42700 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
42702 SDNode *User = *UI;
42705 switch (User->getOpcode()) {
42707 // Be conservative.
42709 case X86ISD::SETCC: CCOpNo = 0; break;
42710 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
42711 case X86ISD::BRCOND: CCOpNo = 2; break;
42712 case X86ISD::CMOV: CCOpNo = 2; break;
42715 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
42716 if (CC != X86::COND_E && CC != X86::COND_NE)
42723 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
42724 // Only handle test patterns.
42725 if (!isNullConstant(N->getOperand(1)))
42728 // If we have a CMP of a truncated binop, see if we can make a smaller binop
42729 // and use its flags directly.
42730 // TODO: Maybe we should try promoting compares that only use the zero flag
42731 // first if we can prove the upper bits with computeKnownBits?
42733 SDValue Op = N->getOperand(0);
42734 EVT VT = Op.getValueType();
42736 // If we have a constant logical shift that's only used in a comparison
42737 // against zero turn it into an equivalent AND. This allows turning it into
42738 // a TEST instruction later.
42739 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
42740 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
42741 onlyZeroFlagUsed(SDValue(N, 0))) {
42742 unsigned BitWidth = VT.getSizeInBits();
42743 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
42744 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
42745 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
42746 APInt Mask = Op.getOpcode() == ISD::SRL
42747 ? APInt::getHighBitsSet(BitWidth, MaskBits)
42748 : APInt::getLowBitsSet(BitWidth, MaskBits);
42749 if (Mask.isSignedIntN(32)) {
42750 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
42751 DAG.getConstant(Mask, dl, VT));
42752 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
42753 DAG.getConstant(0, dl, VT));
42758 // Look for a truncate with a single use.
42759 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
42762 Op = Op.getOperand(0);
42764 // Arithmetic op can only have one use.
42765 if (!Op.hasOneUse())
42769 switch (Op.getOpcode()) {
42770 default: return SDValue();
42772 // Skip and with constant. We have special handling for and with immediate
42773 // during isel to generate test instructions.
42774 if (isa<ConstantSDNode>(Op.getOperand(1)))
42776 NewOpc = X86ISD::AND;
42778 case ISD::OR: NewOpc = X86ISD::OR; break;
42779 case ISD::XOR: NewOpc = X86ISD::XOR; break;
42781 // If the carry or overflow flag is used, we can't truncate.
42782 if (needCarryOrOverflowFlag(SDValue(N, 0)))
42784 NewOpc = X86ISD::ADD;
42787 // If the carry or overflow flag is used, we can't truncate.
42788 if (needCarryOrOverflowFlag(SDValue(N, 0)))
42790 NewOpc = X86ISD::SUB;
42794 // We found an op we can narrow. Truncate its inputs.
42795 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
42796 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
42798 // Use a X86 specific opcode to avoid DAG combine messing with it.
42799 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42800 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
42802 // For AND, keep a CMP so that we can match the test pattern.
42803 if (NewOpc == X86ISD::AND)
42804 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
42805 DAG.getConstant(0, dl, VT));
42807 // Return the flags.
42808 return Op.getValue(1);
42811 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
42812 TargetLowering::DAGCombinerInfo &DCI) {
42813 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
42814 "Expected X86ISD::ADD or X86ISD::SUB");
42817 SDValue LHS = N->getOperand(0);
42818 SDValue RHS = N->getOperand(1);
42819 MVT VT = LHS.getSimpleValueType();
42820 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
42822 // If we don't use the flag result, simplify back to a generic ADD/SUB.
42823 if (!N->hasAnyUseOfValue(1)) {
42824 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
42825 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
42828 // Fold any similar generic ADD/SUB opcodes to reuse this node.
42829 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
42830 SDValue Ops[] = {N0, N1};
42831 SDVTList VTs = DAG.getVTList(N->getValueType(0));
42832 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
42835 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
42836 DCI.CombineTo(GenericAddSub, Op);
42839 MatchGeneric(LHS, RHS, false);
42840 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
42845 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
42846 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
42847 MVT VT = N->getSimpleValueType(0);
42848 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42849 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
42850 N->getOperand(0), N->getOperand(1),
42854 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
42855 // iff the flag result is dead.
42856 SDValue Op0 = N->getOperand(0);
42857 SDValue Op1 = N->getOperand(1);
42858 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
42859 !N->hasAnyUseOfValue(1))
42860 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
42861 Op0.getOperand(1), N->getOperand(2));
42866 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
42867 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
42868 TargetLowering::DAGCombinerInfo &DCI) {
42869 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
42870 // the result is either zero or one (depending on the input carry bit).
42871 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
42872 if (X86::isZeroNode(N->getOperand(0)) &&
42873 X86::isZeroNode(N->getOperand(1)) &&
42874 // We don't have a good way to replace an EFLAGS use, so only do this when
42876 SDValue(N, 1).use_empty()) {
42878 EVT VT = N->getValueType(0);
42879 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
42880 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
42881 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42882 DAG.getConstant(X86::COND_B, DL,
42885 DAG.getConstant(1, DL, VT));
42886 return DCI.CombineTo(N, Res1, CarryOut);
42889 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
42890 MVT VT = N->getSimpleValueType(0);
42891 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
42892 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
42893 N->getOperand(0), N->getOperand(1),
42900 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
42901 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
42902 /// with CMP+{ADC, SBB}.
42903 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
42904 bool IsSub = N->getOpcode() == ISD::SUB;
42905 SDValue X = N->getOperand(0);
42906 SDValue Y = N->getOperand(1);
42908 // If this is an add, canonicalize a zext operand to the RHS.
42909 // TODO: Incomplete? What if both sides are zexts?
42910 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
42911 Y.getOpcode() != ISD::ZERO_EXTEND)
42914 // Look through a one-use zext.
42915 bool PeekedThroughZext = false;
42916 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
42917 Y = Y.getOperand(0);
42918 PeekedThroughZext = true;
42921 // If this is an add, canonicalize a setcc operand to the RHS.
42922 // TODO: Incomplete? What if both sides are setcc?
42923 // TODO: Should we allow peeking through a zext of the other operand?
42924 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
42925 Y.getOpcode() != X86ISD::SETCC)
42928 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
42932 EVT VT = N->getValueType(0);
42933 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
42935 // If X is -1 or 0, then we have an opportunity to avoid constants required in
42936 // the general case below.
42937 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
42939 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
42940 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
42941 // This is a complicated way to get -1 or 0 from the carry flag:
42942 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
42943 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
42944 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42945 DAG.getConstant(X86::COND_B, DL, MVT::i8),
42949 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
42950 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
42951 SDValue EFLAGS = Y->getOperand(1);
42952 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
42953 EFLAGS.getValueType().isInteger() &&
42954 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
42955 // Swap the operands of a SUB, and we have the same pattern as above.
42956 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
42957 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
42958 SDValue NewSub = DAG.getNode(
42959 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
42960 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
42961 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
42962 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
42963 DAG.getConstant(X86::COND_B, DL, MVT::i8),
42969 if (CC == X86::COND_B) {
42970 // X + SETB Z --> adc X, 0
42971 // X - SETB Z --> sbb X, 0
42972 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
42973 DAG.getVTList(VT, MVT::i32), X,
42974 DAG.getConstant(0, DL, VT), Y.getOperand(1));
42977 if (CC == X86::COND_A) {
42978 SDValue EFLAGS = Y->getOperand(1);
42979 // Try to convert COND_A into COND_B in an attempt to facilitate
42980 // materializing "setb reg".
42982 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
42983 // cannot take an immediate as its first operand.
42985 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
42986 EFLAGS.getValueType().isInteger() &&
42987 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
42988 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
42989 EFLAGS.getNode()->getVTList(),
42990 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
42991 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
42992 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
42993 DAG.getVTList(VT, MVT::i32), X,
42994 DAG.getConstant(0, DL, VT), NewEFLAGS);
42998 if (CC != X86::COND_E && CC != X86::COND_NE)
43001 SDValue Cmp = Y.getOperand(1);
43002 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
43003 !X86::isZeroNode(Cmp.getOperand(1)) ||
43004 !Cmp.getOperand(0).getValueType().isInteger())
43007 SDValue Z = Cmp.getOperand(0);
43008 EVT ZVT = Z.getValueType();
43010 // If X is -1 or 0, then we have an opportunity to avoid constants required in
43011 // the general case below.
43013 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
43015 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
43016 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
43017 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
43018 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
43019 SDValue Zero = DAG.getConstant(0, DL, ZVT);
43020 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
43021 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
43022 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43023 DAG.getConstant(X86::COND_B, DL, MVT::i8),
43024 SDValue(Neg.getNode(), 1));
43027 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
43028 // with fake operands:
43029 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
43030 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
43031 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
43032 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
43033 SDValue One = DAG.getConstant(1, DL, ZVT);
43034 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43035 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43036 DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1);
43040 // (cmp Z, 1) sets the carry flag if Z is 0.
43041 SDValue One = DAG.getConstant(1, DL, ZVT);
43042 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43044 // Add the flags type for ADC/SBB nodes.
43045 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43047 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
43048 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
43049 if (CC == X86::COND_NE)
43050 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
43051 DAG.getConstant(-1ULL, DL, VT), Cmp1);
43053 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
43054 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
43055 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
43056 DAG.getConstant(0, DL, VT), Cmp1);
43059 static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
43060 const X86Subtarget &Subtarget) {
43061 if (!Subtarget.hasSSE2())
43064 SDValue Op0 = N->getOperand(0);
43065 SDValue Op1 = N->getOperand(1);
43067 EVT VT = N->getValueType(0);
43069 // If the vector size is less than 128, or greater than the supported RegSize,
43070 // do not use PMADD.
43071 if (!VT.isVector() || VT.getVectorNumElements() < 8)
43074 if (Op0.getOpcode() != ISD::MUL)
43075 std::swap(Op0, Op1);
43076 if (Op0.getOpcode() != ISD::MUL)
43080 if (!canReduceVMulWidth(Op0.getNode(), DAG, Mode) || Mode == MULU16)
43084 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43085 VT.getVectorNumElements());
43086 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43087 VT.getVectorNumElements() / 2);
43089 // Madd vector size is half of the original vector size
43090 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43091 ArrayRef<SDValue> Ops) {
43092 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43093 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
43096 auto BuildPMADDWD = [&](SDValue Mul) {
43097 // Shrink the operands of mul.
43098 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(0));
43099 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(1));
43101 SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
43103 // Fill the rest of the output with 0
43104 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd,
43105 DAG.getConstant(0, DL, MAddVT));
43108 Op0 = BuildPMADDWD(Op0);
43110 // It's possible that Op1 is also a mul we can reduce.
43111 if (Op1.getOpcode() == ISD::MUL &&
43112 canReduceVMulWidth(Op1.getNode(), DAG, Mode) && Mode != MULU16) {
43113 Op1 = BuildPMADDWD(Op1);
43116 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
43119 static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
43120 const X86Subtarget &Subtarget) {
43121 if (!Subtarget.hasSSE2())
43125 EVT VT = N->getValueType(0);
43126 SDValue Op0 = N->getOperand(0);
43127 SDValue Op1 = N->getOperand(1);
43129 // TODO: There's nothing special about i32, any integer type above i16 should
43130 // work just as well.
43131 if (!VT.isVector() || !VT.isSimple() ||
43132 !(VT.getVectorElementType() == MVT::i32))
43135 unsigned RegSize = 128;
43136 if (Subtarget.useBWIRegs())
43138 else if (Subtarget.hasAVX())
43141 // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
43142 // TODO: We should be able to handle larger vectors by splitting them before
43143 // feeding them into several SADs, and then reducing over those.
43144 if (VT.getSizeInBits() / 4 > RegSize)
43147 // We know N is a reduction add, which means one of its operands is a phi.
43148 // To match SAD, we need the other operand to be a ABS.
43149 if (Op0.getOpcode() != ISD::ABS)
43150 std::swap(Op0, Op1);
43151 if (Op0.getOpcode() != ISD::ABS)
43154 auto BuildPSADBW = [&](SDValue Op0, SDValue Op1) {
43155 // SAD pattern detected. Now build a SAD instruction and an addition for
43156 // reduction. Note that the number of elements of the result of SAD is less
43157 // than the number of elements of its input. Therefore, we could only update
43158 // part of elements in the reduction vector.
43159 SDValue Sad = createPSADBW(DAG, Op0, Op1, DL, Subtarget);
43161 // The output of PSADBW is a vector of i64.
43162 // We need to turn the vector of i64 into a vector of i32.
43163 // If the reduction vector is at least as wide as the psadbw result, just
43164 // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
43166 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
43167 if (VT.getSizeInBits() >= ResVT.getSizeInBits())
43168 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
43170 Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
43172 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
43173 // Fill the upper elements with zero to match the add width.
43174 SDValue Zero = DAG.getConstant(0, DL, VT);
43175 Sad = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Zero, Sad,
43176 DAG.getIntPtrConstant(0, DL));
43182 // Check whether we have an abs-diff pattern feeding into the select.
43183 SDValue SadOp0, SadOp1;
43184 if (!detectZextAbsDiff(Op0, SadOp0, SadOp1))
43187 Op0 = BuildPSADBW(SadOp0, SadOp1);
43189 // It's possible we have a sad on the other side too.
43190 if (Op1.getOpcode() == ISD::ABS &&
43191 detectZextAbsDiff(Op1, SadOp0, SadOp1)) {
43192 Op1 = BuildPSADBW(SadOp0, SadOp1);
43195 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
43198 /// Convert vector increment or decrement to sub/add with an all-ones constant:
43199 /// add X, <1, 1...> --> sub X, <-1, -1...>
43200 /// sub X, <1, 1...> --> add X, <-1, -1...>
43201 /// The all-ones vector constant can be materialized using a pcmpeq instruction
43202 /// that is commonly recognized as an idiom (has no register dependency), so
43203 /// that's better/smaller than loading a splat 1 constant.
43204 static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) {
43205 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
43206 "Unexpected opcode for increment/decrement transform");
43208 // Pseudo-legality check: getOnesVector() expects one of these types, so bail
43209 // out and wait for legalization if we have an unsupported vector length.
43210 EVT VT = N->getValueType(0);
43211 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
43215 if (!isConstantSplat(N->getOperand(1), SplatVal) || !SplatVal.isOneValue())
43218 SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N));
43219 unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
43220 return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec);
43223 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
43224 const SDLoc &DL, EVT VT,
43225 const X86Subtarget &Subtarget) {
43226 // Example of pattern we try to detect:
43227 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
43228 //(add (build_vector (extract_elt t, 0),
43229 // (extract_elt t, 2),
43230 // (extract_elt t, 4),
43231 // (extract_elt t, 6)),
43232 // (build_vector (extract_elt t, 1),
43233 // (extract_elt t, 3),
43234 // (extract_elt t, 5),
43235 // (extract_elt t, 7)))
43237 if (!Subtarget.hasSSE2())
43240 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
43241 Op1.getOpcode() != ISD::BUILD_VECTOR)
43244 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43245 VT.getVectorNumElements() < 4 ||
43246 !isPowerOf2_32(VT.getVectorNumElements()))
43249 // Check if one of Op0,Op1 is of the form:
43250 // (build_vector (extract_elt Mul, 0),
43251 // (extract_elt Mul, 2),
43252 // (extract_elt Mul, 4),
43254 // the other is of the form:
43255 // (build_vector (extract_elt Mul, 1),
43256 // (extract_elt Mul, 3),
43257 // (extract_elt Mul, 5),
43259 // and identify Mul.
43261 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
43262 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
43263 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
43264 // TODO: Be more tolerant to undefs.
43265 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43266 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43267 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43268 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43270 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
43271 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
43272 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
43273 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
43274 if (!Const0L || !Const1L || !Const0H || !Const1H)
43276 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
43277 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
43278 // Commutativity of mul allows factors of a product to reorder.
43280 std::swap(Idx0L, Idx1L);
43282 std::swap(Idx0H, Idx1H);
43283 // Commutativity of add allows pairs of factors to reorder.
43284 if (Idx0L > Idx0H) {
43285 std::swap(Idx0L, Idx0H);
43286 std::swap(Idx1L, Idx1H);
43288 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
43289 Idx1H != 2 * i + 3)
43292 // First time an extract_elt's source vector is visited. Must be a MUL
43293 // with 2X number of vector elements than the BUILD_VECTOR.
43294 // Both extracts must be from same MUL.
43295 Mul = Op0L->getOperand(0);
43296 if (Mul->getOpcode() != ISD::MUL ||
43297 Mul.getValueType().getVectorNumElements() != 2 * e)
43300 // Check that the extract is from the same MUL previously seen.
43301 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
43302 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
43306 // Check if the Mul source can be safely shrunk.
43308 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
43311 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43312 ArrayRef<SDValue> Ops) {
43313 // Shrink by adding truncate nodes and let DAGCombine fold with the
43315 EVT InVT = Ops[0].getValueType();
43316 assert(InVT.getScalarType() == MVT::i32 &&
43317 "Unexpected scalar element type");
43318 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
43319 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43320 InVT.getVectorNumElements() / 2);
43321 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43322 InVT.getVectorNumElements());
43323 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
43324 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
43325 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
43327 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43328 { Mul.getOperand(0), Mul.getOperand(1) },
43332 // Attempt to turn this pattern into PMADDWD.
43333 // (mul (add (zext (build_vector)), (zext (build_vector))),
43334 // (add (zext (build_vector)), (zext (build_vector)))
43335 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
43336 const SDLoc &DL, EVT VT,
43337 const X86Subtarget &Subtarget) {
43338 if (!Subtarget.hasSSE2())
43341 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
43344 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43345 VT.getVectorNumElements() < 4 ||
43346 !isPowerOf2_32(VT.getVectorNumElements()))
43349 SDValue N00 = N0.getOperand(0);
43350 SDValue N01 = N0.getOperand(1);
43351 SDValue N10 = N1.getOperand(0);
43352 SDValue N11 = N1.getOperand(1);
43354 // All inputs need to be sign extends.
43355 // TODO: Support ZERO_EXTEND from known positive?
43356 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
43357 N01.getOpcode() != ISD::SIGN_EXTEND ||
43358 N10.getOpcode() != ISD::SIGN_EXTEND ||
43359 N11.getOpcode() != ISD::SIGN_EXTEND)
43362 // Peek through the extends.
43363 N00 = N00.getOperand(0);
43364 N01 = N01.getOperand(0);
43365 N10 = N10.getOperand(0);
43366 N11 = N11.getOperand(0);
43368 // Must be extending from vXi16.
43369 EVT InVT = N00.getValueType();
43370 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
43371 N10.getValueType() != InVT || N11.getValueType() != InVT)
43374 // All inputs should be build_vectors.
43375 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
43376 N01.getOpcode() != ISD::BUILD_VECTOR ||
43377 N10.getOpcode() != ISD::BUILD_VECTOR ||
43378 N11.getOpcode() != ISD::BUILD_VECTOR)
43381 // For each element, we need to ensure we have an odd element from one vector
43382 // multiplied by the odd element of another vector and the even element from
43383 // one of the same vectors being multiplied by the even element from the
43384 // other vector. So we need to make sure for each element i, this operator
43385 // is being performed:
43386 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
43388 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
43389 SDValue N00Elt = N00.getOperand(i);
43390 SDValue N01Elt = N01.getOperand(i);
43391 SDValue N10Elt = N10.getOperand(i);
43392 SDValue N11Elt = N11.getOperand(i);
43393 // TODO: Be more tolerant to undefs.
43394 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43395 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43396 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43397 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43399 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
43400 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
43401 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
43402 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
43403 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
43405 unsigned IdxN00 = ConstN00Elt->getZExtValue();
43406 unsigned IdxN01 = ConstN01Elt->getZExtValue();
43407 unsigned IdxN10 = ConstN10Elt->getZExtValue();
43408 unsigned IdxN11 = ConstN11Elt->getZExtValue();
43409 // Add is commutative so indices can be reordered.
43410 if (IdxN00 > IdxN10) {
43411 std::swap(IdxN00, IdxN10);
43412 std::swap(IdxN01, IdxN11);
43414 // N0 indices be the even element. N1 indices must be the next odd element.
43415 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
43416 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
43418 SDValue N00In = N00Elt.getOperand(0);
43419 SDValue N01In = N01Elt.getOperand(0);
43420 SDValue N10In = N10Elt.getOperand(0);
43421 SDValue N11In = N11Elt.getOperand(0);
43422 // First time we find an input capture it.
43427 // Mul is commutative so the input vectors can be in any order.
43428 // Canonicalize to make the compares easier.
43430 std::swap(N00In, N01In);
43432 std::swap(N10In, N11In);
43433 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
43437 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43438 ArrayRef<SDValue> Ops) {
43439 // Shrink by adding truncate nodes and let DAGCombine fold with the
43441 EVT OpVT = Ops[0].getValueType();
43442 assert(OpVT.getScalarType() == MVT::i16 &&
43443 "Unexpected scalar element type");
43444 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
43445 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43446 OpVT.getVectorNumElements() / 2);
43447 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
43449 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
43453 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
43454 const X86Subtarget &Subtarget) {
43455 const SDNodeFlags Flags = N->getFlags();
43456 if (Flags.hasVectorReduction()) {
43457 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
43459 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
43462 EVT VT = N->getValueType(0);
43463 SDValue Op0 = N->getOperand(0);
43464 SDValue Op1 = N->getOperand(1);
43466 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43468 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43471 // Try to synthesize horizontal adds from adds of shuffles.
43472 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43473 VT == MVT::v8i32) &&
43474 Subtarget.hasSSSE3() &&
43475 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
43476 auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43477 ArrayRef<SDValue> Ops) {
43478 return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
43480 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43484 if (SDValue V = combineIncDecVector(N, DAG))
43487 return combineAddOrSubToADCOrSBB(N, DAG);
43490 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
43491 const X86Subtarget &Subtarget) {
43492 SDValue Op0 = N->getOperand(0);
43493 SDValue Op1 = N->getOperand(1);
43494 EVT VT = N->getValueType(0);
43496 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
43497 // is only worth it with SSSE3 (PSHUFB).
43498 if (!(Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) &&
43499 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
43500 !(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)) &&
43501 !(Subtarget.useBWIRegs() && (VT == MVT::v64i8 || VT == MVT::v32i16 ||
43502 VT == MVT::v16i32 || VT == MVT::v8i64)))
43505 SDValue SubusLHS, SubusRHS;
43506 // Try to find umax(a,b) - b or a - umin(a,b) patterns
43507 // they may be converted to subus(a,b).
43508 // TODO: Need to add IR canonicalization for this code.
43509 if (Op0.getOpcode() == ISD::UMAX) {
43511 SDValue MaxLHS = Op0.getOperand(0);
43512 SDValue MaxRHS = Op0.getOperand(1);
43515 else if (MaxRHS == Op1)
43519 } else if (Op1.getOpcode() == ISD::UMIN) {
43521 SDValue MinLHS = Op1.getOperand(0);
43522 SDValue MinRHS = Op1.getOperand(1);
43525 else if (MinRHS == Op0)
43532 auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43533 ArrayRef<SDValue> Ops) {
43534 return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops);
43537 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
43538 // special preprocessing in some cases.
43539 if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64)
43540 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
43541 { SubusLHS, SubusRHS }, USUBSATBuilder);
43543 // Special preprocessing case can be only applied
43544 // if the value was zero extended from 16 bit,
43545 // so we require first 16 bits to be zeros for 32 bit
43546 // values, or first 48 bits for 64 bit values.
43547 KnownBits Known = DAG.computeKnownBits(SubusLHS);
43548 unsigned NumZeros = Known.countMinLeadingZeros();
43549 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
43552 EVT ExtType = SubusLHS.getValueType();
43554 if (VT == MVT::v8i32 || VT == MVT::v8i64)
43555 ShrinkedType = MVT::v8i16;
43557 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
43559 // If SubusLHS is zeroextended - truncate SubusRHS to it's
43560 // size SubusRHS = umin(0xFFF.., SubusRHS).
43561 SDValue SaturationConst =
43562 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
43563 ShrinkedType.getScalarSizeInBits()),
43564 SDLoc(SubusLHS), ExtType);
43565 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
43567 SDValue NewSubusLHS =
43568 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
43569 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
43571 SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType,
43572 { NewSubusLHS, NewSubusRHS }, USUBSATBuilder);
43573 // Zero extend the result, it may be used somewhere as 32 bit,
43574 // if not zext and following trunc will shrink.
43575 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
43578 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
43579 const X86Subtarget &Subtarget) {
43580 SDValue Op0 = N->getOperand(0);
43581 SDValue Op1 = N->getOperand(1);
43583 // X86 can't encode an immediate LHS of a sub. See if we can push the
43584 // negation into a preceding instruction.
43585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
43586 // If the RHS of the sub is a XOR with one use and a constant, invert the
43587 // immediate. Then add one to the LHS of the sub so we can turn
43588 // X-Y -> X+~Y+1, saving one register.
43589 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
43590 isa<ConstantSDNode>(Op1.getOperand(1))) {
43591 const APInt &XorC = Op1.getConstantOperandAPInt(1);
43592 EVT VT = Op0.getValueType();
43593 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
43595 DAG.getConstant(~XorC, SDLoc(Op1), VT));
43596 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
43597 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
43601 // Try to synthesize horizontal subs from subs of shuffles.
43602 EVT VT = N->getValueType(0);
43603 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43604 VT == MVT::v8i32) &&
43605 Subtarget.hasSSSE3() &&
43606 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
43607 auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43608 ArrayRef<SDValue> Ops) {
43609 return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
43611 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43615 if (SDValue V = combineIncDecVector(N, DAG))
43618 // Try to create PSUBUS if SUB's argument is max/min
43619 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
43622 return combineAddOrSubToADCOrSBB(N, DAG);
43625 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
43626 const X86Subtarget &Subtarget) {
43627 MVT VT = N->getSimpleValueType(0);
43630 if (N->getOperand(0) == N->getOperand(1)) {
43631 if (N->getOpcode() == X86ISD::PCMPEQ)
43632 return DAG.getConstant(-1, DL, VT);
43633 if (N->getOpcode() == X86ISD::PCMPGT)
43634 return DAG.getConstant(0, DL, VT);
43640 /// Helper that combines an array of subvector ops as if they were the operands
43641 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
43642 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
43643 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
43644 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
43645 TargetLowering::DAGCombinerInfo &DCI,
43646 const X86Subtarget &Subtarget) {
43647 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
43649 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
43650 return DAG.getUNDEF(VT);
43652 if (llvm::all_of(Ops, [](SDValue Op) {
43653 return ISD::isBuildVectorAllZeros(Op.getNode());
43655 return getZeroVector(VT, Subtarget, DAG, DL);
43657 SDValue Op0 = Ops[0];
43659 // Fold subvector loads into one.
43660 // If needed, look through bitcasts to get to the load.
43661 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
43663 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
43664 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
43665 *FirstLd->getMemOperand(), &Fast) &&
43668 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
43673 // Repeated subvectors.
43674 if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
43675 // If this broadcast/subv_broadcast is inserted into both halves, use a
43676 // larger broadcast/subv_broadcast.
43677 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
43678 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
43679 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
43681 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
43682 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
43683 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
43684 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
43685 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
43687 DAG.getIntPtrConstant(0, DL)));
43689 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
43690 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
43691 (Subtarget.hasAVX2() ||
43692 (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
43693 Op0.getOperand(0).getValueType() == VT.getScalarType())
43694 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
43697 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
43699 // Repeated opcode.
43700 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
43701 // but it currently struggles with different vector widths.
43702 if (llvm::all_of(Ops, [Op0](SDValue Op) {
43703 return Op.getOpcode() == Op0.getOpcode();
43705 unsigned NumOps = Ops.size();
43706 switch (Op0.getOpcode()) {
43707 case X86ISD::PSHUFHW:
43708 case X86ISD::PSHUFLW:
43709 case X86ISD::PSHUFD:
43710 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
43711 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
43712 SmallVector<SDValue, 2> Src;
43713 for (unsigned i = 0; i != NumOps; ++i)
43714 Src.push_back(Ops[i].getOperand(0));
43715 return DAG.getNode(Op0.getOpcode(), DL, VT,
43716 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
43717 Op0.getOperand(1));
43720 case X86ISD::VPERMILPI:
43721 // TODO - add support for vXf64/vXi64 shuffles.
43722 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
43723 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
43724 SmallVector<SDValue, 2> Src;
43725 for (unsigned i = 0; i != NumOps; ++i)
43726 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
43727 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
43728 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
43729 Op0.getOperand(1));
43730 return DAG.getBitcast(VT, Res);
43733 case X86ISD::PACKUS:
43734 if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
43735 SmallVector<SDValue, 2> LHS, RHS;
43736 for (unsigned i = 0; i != NumOps; ++i) {
43737 LHS.push_back(Ops[i].getOperand(0));
43738 RHS.push_back(Ops[i].getOperand(1));
43740 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
43741 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
43742 NumOps * SrcVT.getVectorNumElements());
43743 return DAG.getNode(Op0.getOpcode(), DL, VT,
43744 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
43745 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
43751 // If we're inserting all zeros into the upper half, change this to
43752 // an insert into an all zeros vector. We will match this to a move
43753 // with implicit upper bit zeroing during isel.
43754 if (Ops.size() == 2 && ISD::isBuildVectorAllZeros(Ops[1].getNode()))
43755 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
43756 getZeroVector(VT, Subtarget, DAG, DL), Ops[0],
43757 DAG.getIntPtrConstant(0, DL));
43762 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
43763 TargetLowering::DAGCombinerInfo &DCI,
43764 const X86Subtarget &Subtarget) {
43765 EVT VT = N->getValueType(0);
43766 EVT SrcVT = N->getOperand(0).getValueType();
43767 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43769 // Don't do anything for i1 vectors.
43770 if (VT.getVectorElementType() == MVT::i1)
43773 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
43774 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
43775 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
43783 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
43784 TargetLowering::DAGCombinerInfo &DCI,
43785 const X86Subtarget &Subtarget) {
43786 if (DCI.isBeforeLegalizeOps())
43789 MVT OpVT = N->getSimpleValueType(0);
43791 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
43794 SDValue Vec = N->getOperand(0);
43795 SDValue SubVec = N->getOperand(1);
43797 uint64_t IdxVal = N->getConstantOperandVal(2);
43798 MVT SubVecVT = SubVec.getSimpleValueType();
43800 if (Vec.isUndef() && SubVec.isUndef())
43801 return DAG.getUNDEF(OpVT);
43803 // Inserting undefs/zeros into zeros/undefs is a zero vector.
43804 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
43805 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
43806 return getZeroVector(OpVT, Subtarget, DAG, dl);
43808 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
43809 // If we're inserting into a zero vector and then into a larger zero vector,
43810 // just insert into the larger zero vector directly.
43811 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
43812 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
43813 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
43814 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
43815 getZeroVector(OpVT, Subtarget, DAG, dl),
43816 SubVec.getOperand(1),
43817 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
43820 // If we're inserting into a zero vector and our input was extracted from an
43821 // insert into a zero vector of the same type and the extraction was at
43822 // least as large as the original insertion. Just insert the original
43823 // subvector into a zero vector.
43824 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
43825 SubVec.getConstantOperandAPInt(1) == 0 &&
43826 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
43827 SDValue Ins = SubVec.getOperand(0);
43828 if (Ins.getConstantOperandAPInt(2) == 0 &&
43829 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
43830 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
43831 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
43832 getZeroVector(OpVT, Subtarget, DAG, dl),
43833 Ins.getOperand(1), N->getOperand(2));
43837 // Stop here if this is an i1 vector.
43841 // If this is an insert of an extract, combine to a shuffle. Don't do this
43842 // if the insert or extract can be represented with a subregister operation.
43843 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
43844 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
43845 (IdxVal != 0 || !Vec.isUndef())) {
43846 int ExtIdxVal = SubVec.getConstantOperandVal(1);
43847 if (ExtIdxVal != 0) {
43848 int VecNumElts = OpVT.getVectorNumElements();
43849 int SubVecNumElts = SubVecVT.getVectorNumElements();
43850 SmallVector<int, 64> Mask(VecNumElts);
43851 // First create an identity shuffle mask.
43852 for (int i = 0; i != VecNumElts; ++i)
43854 // Now insert the extracted portion.
43855 for (int i = 0; i != SubVecNumElts; ++i)
43856 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
43858 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
43862 // Match concat_vector style patterns.
43863 SmallVector<SDValue, 2> SubVectorOps;
43864 if (collectConcatOps(N, SubVectorOps))
43866 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
43869 // If we are inserting into both halves of the vector, the starting vector
43870 // should be undef. If it isn't, make it so. Only do this if the early insert
43871 // has no other uses.
43872 // TODO: Should this be a generic DAG combine?
43873 // TODO: Why doesn't SimplifyDemandedVectorElts catch this?
43874 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
43875 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
43876 OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2 &&
43877 isNullConstant(Vec.getOperand(2)) && !Vec.getOperand(0).isUndef() &&
43878 Vec.getOperand(1).getValueSizeInBits() == SubVecVT.getSizeInBits() &&
43880 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT),
43881 Vec.getOperand(1), Vec.getOperand(2));
43882 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec, SubVec,
43886 // If this is a broadcast insert into an upper undef, use a larger broadcast.
43887 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
43888 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
43893 /// If we are extracting a subvector of a vector select and the select condition
43894 /// is composed of concatenated vectors, try to narrow the select width. This
43895 /// is a common pattern for AVX1 integer code because 256-bit selects may be
43896 /// legal, but there is almost no integer math/logic available for 256-bit.
43897 /// This function should only be called with legal types (otherwise, the calls
43898 /// to get simple value types will assert).
43899 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
43900 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
43901 SmallVector<SDValue, 4> CatOps;
43902 if (Sel.getOpcode() != ISD::VSELECT ||
43903 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
43906 // Note: We assume simple value types because this should only be called with
43907 // legal operations/types.
43908 // TODO: This can be extended to handle extraction to 256-bits.
43909 MVT VT = Ext->getSimpleValueType(0);
43910 if (!VT.is128BitVector())
43913 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
43914 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
43917 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
43918 MVT SelVT = Sel.getSimpleValueType();
43919 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
43920 "Unexpected vector type with legal operations");
43922 unsigned SelElts = SelVT.getVectorNumElements();
43923 unsigned CastedElts = WideVT.getVectorNumElements();
43924 unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
43925 if (SelElts % CastedElts == 0) {
43926 // The select has the same or more (narrower) elements than the extract
43927 // operand. The extraction index gets scaled by that factor.
43928 ExtIdx *= (SelElts / CastedElts);
43929 } else if (CastedElts % SelElts == 0) {
43930 // The select has less (wider) elements than the extract operand. Make sure
43931 // that the extraction index can be divided evenly.
43932 unsigned IndexDivisor = CastedElts / SelElts;
43933 if (ExtIdx % IndexDivisor != 0)
43935 ExtIdx /= IndexDivisor;
43937 llvm_unreachable("Element count of simple vector types are not divisible?");
43940 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
43941 unsigned NarrowElts = SelElts / NarrowingFactor;
43942 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
43944 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
43945 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
43946 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
43947 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
43948 return DAG.getBitcast(VT, NarrowSel);
43951 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
43952 TargetLowering::DAGCombinerInfo &DCI,
43953 const X86Subtarget &Subtarget) {
43954 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
43955 // eventually get combined/lowered into ANDNP) with a concatenated operand,
43956 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
43957 // We let generic combining take over from there to simplify the
43958 // insert/extract and 'not'.
43959 // This pattern emerges during AVX1 legalization. We handle it before lowering
43960 // to avoid complications like splitting constant vector loads.
43962 // Capture the original wide type in the likely case that we need to bitcast
43963 // back to this type.
43964 if (!N->getValueType(0).isSimple())
43967 MVT VT = N->getSimpleValueType(0);
43968 EVT WideVecVT = N->getOperand(0).getValueType();
43969 SDValue WideVec = peekThroughBitcasts(N->getOperand(0));
43970 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43971 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
43972 TLI.isTypeLegal(WideVecVT) &&
43973 WideVecVT.getSizeInBits() == 256 && WideVec.getOpcode() == ISD::AND) {
43974 auto isConcatenatedNot = [] (SDValue V) {
43975 V = peekThroughBitcasts(V);
43976 if (!isBitwiseNot(V))
43978 SDValue NotOp = V->getOperand(0);
43979 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
43981 if (isConcatenatedNot(WideVec.getOperand(0)) ||
43982 isConcatenatedNot(WideVec.getOperand(1))) {
43983 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
43984 SDValue Concat = split256IntArith(WideVec, DAG);
43985 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
43986 DAG.getBitcast(WideVecVT, Concat), N->getOperand(1));
43990 if (DCI.isBeforeLegalizeOps())
43993 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
43996 SDValue InVec = N->getOperand(0);
43997 unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
43999 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
44000 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
44002 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
44003 if (VT.getScalarType() == MVT::i1)
44004 return DAG.getConstant(1, SDLoc(N), VT);
44005 return getOnesVector(VT, DAG, SDLoc(N));
44008 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
44009 return DAG.getBuildVector(
44011 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
44013 // Try to move vector bitcast after extract_subv by scaling extraction index:
44014 // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
44015 // TODO: Move this to DAGCombiner::visitEXTRACT_SUBVECTOR
44016 if (InVec.getOpcode() == ISD::BITCAST &&
44017 InVec.getOperand(0).getValueType().isVector()) {
44018 SDValue SrcOp = InVec.getOperand(0);
44019 EVT SrcVT = SrcOp.getValueType();
44020 unsigned SrcNumElts = SrcVT.getVectorNumElements();
44021 unsigned DestNumElts = InVec.getValueType().getVectorNumElements();
44022 if ((DestNumElts % SrcNumElts) == 0) {
44023 unsigned DestSrcRatio = DestNumElts / SrcNumElts;
44024 if ((VT.getVectorNumElements() % DestSrcRatio) == 0) {
44025 unsigned NewExtNumElts = VT.getVectorNumElements() / DestSrcRatio;
44026 EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(),
44027 SrcVT.getScalarType(), NewExtNumElts);
44028 if ((N->getConstantOperandVal(1) % DestSrcRatio) == 0 &&
44029 TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
44030 unsigned IndexValScaled = N->getConstantOperandVal(1) / DestSrcRatio;
44032 SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
44033 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
44035 return DAG.getBitcast(VT, NewExtract);
44041 // If we're extracting from a broadcast then we're better off just
44042 // broadcasting to the smaller type directly, assuming this is the only use.
44043 // As its a broadcast we don't care about the extraction index.
44044 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
44045 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
44046 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
44048 // If we're extracting the lowest subvector and we're the only user,
44049 // we may be able to perform this with a smaller vector width.
44050 if (IdxVal == 0 && InVec.hasOneUse()) {
44051 unsigned InOpcode = InVec.getOpcode();
44052 if (VT == MVT::v2f64 && InVec.getValueType() == MVT::v4f64) {
44053 // v2f64 CVTDQ2PD(v4i32).
44054 if (InOpcode == ISD::SINT_TO_FP &&
44055 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44056 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
44058 // v2f64 CVTUDQ2PD(v4i32).
44059 if (InOpcode == ISD::UINT_TO_FP &&
44060 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44061 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
44063 // v2f64 CVTPS2PD(v4f32).
44064 if (InOpcode == ISD::FP_EXTEND &&
44065 InVec.getOperand(0).getValueType() == MVT::v4f32) {
44066 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
44069 if ((InOpcode == ISD::ANY_EXTEND ||
44070 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
44071 InOpcode == ISD::ZERO_EXTEND ||
44072 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
44073 InOpcode == ISD::SIGN_EXTEND ||
44074 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44075 VT.is128BitVector() &&
44076 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
44077 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
44078 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
44080 if (InOpcode == ISD::VSELECT &&
44081 InVec.getOperand(0).getValueType().is256BitVector() &&
44082 InVec.getOperand(1).getValueType().is256BitVector() &&
44083 InVec.getOperand(2).getValueType().is256BitVector()) {
44085 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
44086 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
44087 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
44088 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
44095 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
44096 EVT VT = N->getValueType(0);
44097 SDValue Src = N->getOperand(0);
44100 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
44101 // This occurs frequently in our masked scalar intrinsic code and our
44102 // floating point select lowering with AVX512.
44103 // TODO: SimplifyDemandedBits instead?
44104 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
44105 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44106 if (C->getAPIntValue().isOneValue())
44107 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
44108 Src.getOperand(0));
44110 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
44111 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44112 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
44113 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
44114 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44115 if (C->isNullValue())
44116 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
44117 Src.getOperand(1));
44119 // Reduce v2i64 to v4i32 if we don't need the upper bits.
44120 // TODO: Move to DAGCombine?
44121 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
44122 Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
44123 Src.getOperand(0).getScalarValueSizeInBits() <= 32)
44124 return DAG.getBitcast(
44125 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
44126 DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
44131 // Simplify PMULDQ and PMULUDQ operations.
44132 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
44133 TargetLowering::DAGCombinerInfo &DCI,
44134 const X86Subtarget &Subtarget) {
44135 SDValue LHS = N->getOperand(0);
44136 SDValue RHS = N->getOperand(1);
44138 // Canonicalize constant to RHS.
44139 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
44140 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
44141 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
44143 // Multiply by zero.
44144 // Don't return RHS as it may contain UNDEFs.
44145 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
44146 return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
44148 // Aggressively peek through ops to get at the demanded low bits.
44149 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
44150 SDValue DemandedLHS = DAG.GetDemandedBits(LHS, DemandedMask);
44151 SDValue DemandedRHS = DAG.GetDemandedBits(RHS, DemandedMask);
44152 if (DemandedLHS || DemandedRHS)
44153 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
44154 DemandedLHS ? DemandedLHS : LHS,
44155 DemandedRHS ? DemandedRHS : RHS);
44157 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
44158 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44159 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
44160 return SDValue(N, 0);
44165 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
44166 TargetLowering::DAGCombinerInfo &DCI,
44167 const X86Subtarget &Subtarget) {
44168 EVT VT = N->getValueType(0);
44169 SDValue In = N->getOperand(0);
44170 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44172 // Try to merge vector loads and extend_inreg to an extload.
44173 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
44175 auto *Ld = cast<LoadSDNode>(In);
44176 if (!Ld->isVolatile()) {
44177 MVT SVT = In.getSimpleValueType().getVectorElementType();
44178 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
44179 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
44180 VT.getVectorNumElements());
44181 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
44183 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
44184 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
44185 Ld->getMemOperand()->getFlags());
44186 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
44192 // Disabling for widening legalization for now. We can enable if we find a
44193 // case that needs it. Otherwise it can be deleted when we switch to
44194 // widening legalization.
44195 if (ExperimentalVectorWideningLegalization)
44198 // Combine (ext_invec (ext_invec X)) -> (ext_invec X)
44199 if (In.getOpcode() == N->getOpcode() &&
44200 TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getOperand(0).getValueType()))
44201 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, In.getOperand(0));
44203 // Attempt to combine as a shuffle.
44204 // TODO: SSE41 support
44205 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
44207 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
44208 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
44215 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
44216 DAGCombinerInfo &DCI) const {
44217 SelectionDAG &DAG = DCI.DAG;
44218 switch (N->getOpcode()) {
44220 case ISD::SCALAR_TO_VECTOR:
44221 return combineScalarToVector(N, DAG);
44222 case ISD::EXTRACT_VECTOR_ELT:
44223 case X86ISD::PEXTRW:
44224 case X86ISD::PEXTRB:
44225 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
44226 case ISD::CONCAT_VECTORS:
44227 return combineConcatVectors(N, DAG, DCI, Subtarget);
44228 case ISD::INSERT_SUBVECTOR:
44229 return combineInsertSubvector(N, DAG, DCI, Subtarget);
44230 case ISD::EXTRACT_SUBVECTOR:
44231 return combineExtractSubvector(N, DAG, DCI, Subtarget);
44234 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
44235 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
44236 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
44237 case X86ISD::CMP: return combineCMP(N, DAG);
44238 case ISD::ADD: return combineAdd(N, DAG, Subtarget);
44239 case ISD::SUB: return combineSub(N, DAG, Subtarget);
44241 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
44242 case X86ISD::SBB: return combineSBB(N, DAG);
44243 case X86ISD::ADC: return combineADC(N, DAG, DCI);
44244 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
44245 case ISD::SHL: return combineShiftLeft(N, DAG);
44246 case ISD::SRA: return combineShiftRightArithmetic(N, DAG);
44247 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI);
44248 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
44249 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
44250 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
44251 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
44252 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
44253 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
44254 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
44255 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
44256 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
44257 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
44259 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
44260 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
44261 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
44262 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
44263 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
44264 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
44266 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
44268 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
44270 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
44271 case X86ISD::CVTSI2P:
44272 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
44273 case X86ISD::CVTP2SI:
44274 case X86ISD::CVTP2UI:
44275 case X86ISD::CVTTP2SI:
44276 case X86ISD::CVTTP2UI: return combineCVTP2I_CVTTP2I(N, DAG, DCI);
44277 case X86ISD::BT: return combineBT(N, DAG, DCI);
44278 case ISD::ANY_EXTEND:
44279 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
44280 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
44281 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
44282 case ISD::ANY_EXTEND_VECTOR_INREG:
44283 case ISD::SIGN_EXTEND_VECTOR_INREG:
44284 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
44286 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
44287 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
44288 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
44289 case X86ISD::PACKSS:
44290 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
44294 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
44295 case X86ISD::VSHLI:
44296 case X86ISD::VSRAI:
44297 case X86ISD::VSRLI:
44298 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
44299 case X86ISD::PINSRB:
44300 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
44301 case X86ISD::SHUFP: // Handle all target specific shuffles
44302 case X86ISD::INSERTPS:
44303 case X86ISD::EXTRQI:
44304 case X86ISD::INSERTQI:
44305 case X86ISD::PALIGNR:
44306 case X86ISD::VSHLDQ:
44307 case X86ISD::VSRLDQ:
44308 case X86ISD::BLENDI:
44309 case X86ISD::UNPCKH:
44310 case X86ISD::UNPCKL:
44311 case X86ISD::MOVHLPS:
44312 case X86ISD::MOVLHPS:
44313 case X86ISD::PSHUFB:
44314 case X86ISD::PSHUFD:
44315 case X86ISD::PSHUFHW:
44316 case X86ISD::PSHUFLW:
44317 case X86ISD::MOVSHDUP:
44318 case X86ISD::MOVSLDUP:
44319 case X86ISD::MOVDDUP:
44320 case X86ISD::MOVSS:
44321 case X86ISD::MOVSD:
44322 case X86ISD::VBROADCAST:
44323 case X86ISD::VPPERM:
44324 case X86ISD::VPERMI:
44325 case X86ISD::VPERMV:
44326 case X86ISD::VPERMV3:
44327 case X86ISD::VPERMIL2:
44328 case X86ISD::VPERMILPI:
44329 case X86ISD::VPERMILPV:
44330 case X86ISD::VPERM2X128:
44331 case X86ISD::SHUF128:
44332 case X86ISD::VZEXT_MOVL:
44333 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
44334 case X86ISD::FMADD_RND:
44335 case X86ISD::FMSUB:
44336 case X86ISD::FMSUB_RND:
44337 case X86ISD::FNMADD:
44338 case X86ISD::FNMADD_RND:
44339 case X86ISD::FNMSUB:
44340 case X86ISD::FNMSUB_RND:
44341 case ISD::FMA: return combineFMA(N, DAG, Subtarget);
44342 case X86ISD::FMADDSUB_RND:
44343 case X86ISD::FMSUBADD_RND:
44344 case X86ISD::FMADDSUB:
44345 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
44346 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI);
44347 case X86ISD::MGATHER:
44348 case X86ISD::MSCATTER:
44350 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
44351 case X86ISD::PCMPEQ:
44352 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
44353 case X86ISD::PMULDQ:
44354 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
44360 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
44361 if (!isTypeLegal(VT))
44364 // There are no vXi8 shifts.
44365 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
44368 // TODO: Almost no 8-bit ops are desirable because they have no actual
44369 // size/speed advantages vs. 32-bit ops, but they do have a major
44370 // potential disadvantage by causing partial register stalls.
44372 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
44373 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
44374 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
44375 // check for a constant operand to the multiply.
44376 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
44379 // i16 instruction encodings are longer and some i16 instructions are slow,
44380 // so those are not desirable.
44381 if (VT == MVT::i16) {
44386 case ISD::SIGN_EXTEND:
44387 case ISD::ZERO_EXTEND:
44388 case ISD::ANY_EXTEND:
44402 // Any legal type not explicitly accounted for above here is desirable.
44406 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
44407 SDValue Value, SDValue Addr,
44408 SelectionDAG &DAG) const {
44409 const Module *M = DAG.getMachineFunction().getMMI().getModule();
44410 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
44411 if (IsCFProtectionSupported) {
44412 // In case control-flow branch protection is enabled, we need to add
44413 // notrack prefix to the indirect branch.
44414 // In order to do that we create NT_BRIND SDNode.
44415 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
44416 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
44419 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
44422 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
44423 EVT VT = Op.getValueType();
44424 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
44425 isa<ConstantSDNode>(Op.getOperand(1));
44427 // i16 is legal, but undesirable since i16 instruction encodings are longer
44428 // and some i16 instructions are slow.
44429 // 8-bit multiply-by-constant can usually be expanded to something cheaper
44430 // using LEA and/or other ALU ops.
44431 if (VT != MVT::i16 && !Is8BitMulByConstant)
44434 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
44435 if (!Op.hasOneUse())
44437 SDNode *User = *Op->use_begin();
44438 if (!ISD::isNormalStore(User))
44440 auto *Ld = cast<LoadSDNode>(Load);
44441 auto *St = cast<StoreSDNode>(User);
44442 return Ld->getBasePtr() == St->getBasePtr();
44445 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
44446 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
44448 if (!Op.hasOneUse())
44450 SDNode *User = *Op->use_begin();
44451 if (User->getOpcode() != ISD::ATOMIC_STORE)
44453 auto *Ld = cast<AtomicSDNode>(Load);
44454 auto *St = cast<AtomicSDNode>(User);
44455 return Ld->getBasePtr() == St->getBasePtr();
44458 bool Commute = false;
44459 switch (Op.getOpcode()) {
44460 default: return false;
44461 case ISD::SIGN_EXTEND:
44462 case ISD::ZERO_EXTEND:
44463 case ISD::ANY_EXTEND:
44468 SDValue N0 = Op.getOperand(0);
44469 // Look out for (store (shl (load), x)).
44470 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
44482 SDValue N0 = Op.getOperand(0);
44483 SDValue N1 = Op.getOperand(1);
44484 // Avoid disabling potential load folding opportunities.
44485 if (MayFoldLoad(N1) &&
44486 (!Commute || !isa<ConstantSDNode>(N0) ||
44487 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
44489 if (MayFoldLoad(N0) &&
44490 ((Commute && !isa<ConstantSDNode>(N1)) ||
44491 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
44493 if (IsFoldableAtomicRMW(N0, Op) ||
44494 (Commute && IsFoldableAtomicRMW(N1, Op)))
44503 bool X86TargetLowering::
44504 isDesirableToCombineBuildVectorToShuffleTruncate(
44505 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
44507 assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
44508 "Element count mismatch");
44510 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
44511 "Shuffle Mask expected to be legal");
44513 // For 32-bit elements VPERMD is better than shuffle+truncate.
44514 // TODO: After we improve lowerBuildVector, add execption for VPERMW.
44515 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
44518 if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
44524 //===----------------------------------------------------------------------===//
44525 // X86 Inline Assembly Support
44526 //===----------------------------------------------------------------------===//
44528 // Helper to match a string separated by whitespace.
44529 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
44530 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
44532 for (StringRef Piece : Pieces) {
44533 if (!S.startswith(Piece)) // Check if the piece matches.
44536 S = S.substr(Piece.size());
44537 StringRef::size_type Pos = S.find_first_not_of(" \t");
44538 if (Pos == 0) // We matched a prefix.
44547 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
44549 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
44550 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
44551 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
44552 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
44554 if (AsmPieces.size() == 3)
44556 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
44563 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
44564 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
44566 const std::string &AsmStr = IA->getAsmString();
44568 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
44569 if (!Ty || Ty->getBitWidth() % 16 != 0)
44572 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
44573 SmallVector<StringRef, 4> AsmPieces;
44574 SplitString(AsmStr, AsmPieces, ";\n");
44576 switch (AsmPieces.size()) {
44577 default: return false;
44579 // FIXME: this should verify that we are targeting a 486 or better. If not,
44580 // we will turn this bswap into something that will be lowered to logical
44581 // ops instead of emitting the bswap asm. For now, we don't support 486 or
44582 // lower so don't worry about this.
44584 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
44585 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
44586 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
44587 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
44588 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
44589 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
44590 // No need to check constraints, nothing other than the equivalent of
44591 // "=r,0" would be valid here.
44592 return IntrinsicLowering::LowerToByteSwap(CI);
44595 // rorw $$8, ${0:w} --> llvm.bswap.i16
44596 if (CI->getType()->isIntegerTy(16) &&
44597 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
44598 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
44599 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
44601 StringRef ConstraintsStr = IA->getConstraintString();
44602 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
44603 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
44604 if (clobbersFlagRegisters(AsmPieces))
44605 return IntrinsicLowering::LowerToByteSwap(CI);
44609 if (CI->getType()->isIntegerTy(32) &&
44610 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
44611 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
44612 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
44613 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
44615 StringRef ConstraintsStr = IA->getConstraintString();
44616 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
44617 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
44618 if (clobbersFlagRegisters(AsmPieces))
44619 return IntrinsicLowering::LowerToByteSwap(CI);
44622 if (CI->getType()->isIntegerTy(64)) {
44623 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
44624 if (Constraints.size() >= 2 &&
44625 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
44626 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
44627 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
44628 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
44629 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
44630 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
44631 return IntrinsicLowering::LowerToByteSwap(CI);
44639 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
44640 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
44641 .Case("{@cca}", X86::COND_A)
44642 .Case("{@ccae}", X86::COND_AE)
44643 .Case("{@ccb}", X86::COND_B)
44644 .Case("{@ccbe}", X86::COND_BE)
44645 .Case("{@ccc}", X86::COND_B)
44646 .Case("{@cce}", X86::COND_E)
44647 .Case("{@ccz}", X86::COND_E)
44648 .Case("{@ccg}", X86::COND_G)
44649 .Case("{@ccge}", X86::COND_GE)
44650 .Case("{@ccl}", X86::COND_L)
44651 .Case("{@ccle}", X86::COND_LE)
44652 .Case("{@ccna}", X86::COND_BE)
44653 .Case("{@ccnae}", X86::COND_B)
44654 .Case("{@ccnb}", X86::COND_AE)
44655 .Case("{@ccnbe}", X86::COND_A)
44656 .Case("{@ccnc}", X86::COND_AE)
44657 .Case("{@ccne}", X86::COND_NE)
44658 .Case("{@ccnz}", X86::COND_NE)
44659 .Case("{@ccng}", X86::COND_LE)
44660 .Case("{@ccnge}", X86::COND_L)
44661 .Case("{@ccnl}", X86::COND_GE)
44662 .Case("{@ccnle}", X86::COND_G)
44663 .Case("{@ccno}", X86::COND_NO)
44664 .Case("{@ccnp}", X86::COND_P)
44665 .Case("{@ccns}", X86::COND_NS)
44666 .Case("{@cco}", X86::COND_O)
44667 .Case("{@ccp}", X86::COND_P)
44668 .Case("{@ccs}", X86::COND_S)
44669 .Default(X86::COND_INVALID);
44673 /// Given a constraint letter, return the type of constraint for this target.
44674 X86TargetLowering::ConstraintType
44675 X86TargetLowering::getConstraintType(StringRef Constraint) const {
44676 if (Constraint.size() == 1) {
44677 switch (Constraint[0]) {
44689 case 'k': // AVX512 masking registers.
44690 return C_RegisterClass;
44706 return C_Immediate;
44715 else if (Constraint.size() == 2) {
44716 switch (Constraint[0]) {
44720 switch (Constraint[1]) {
44731 return C_RegisterClass;
44734 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
44736 return TargetLowering::getConstraintType(Constraint);
44739 /// Examine constraint type and operand type and determine a weight value.
44740 /// This object must already have been set up with the operand type
44741 /// and the current alternative constraint selected.
44742 TargetLowering::ConstraintWeight
44743 X86TargetLowering::getSingleConstraintMatchWeight(
44744 AsmOperandInfo &info, const char *constraint) const {
44745 ConstraintWeight weight = CW_Invalid;
44746 Value *CallOperandVal = info.CallOperandVal;
44747 // If we don't have a value, we can't do a match,
44748 // but allow it at the lowest weight.
44749 if (!CallOperandVal)
44751 Type *type = CallOperandVal->getType();
44752 // Look at the constraint type.
44753 switch (*constraint) {
44755 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
44767 if (CallOperandVal->getType()->isIntegerTy())
44768 weight = CW_SpecificReg;
44773 if (type->isFloatingPointTy())
44774 weight = CW_SpecificReg;
44777 if (type->isX86_MMXTy() && Subtarget.hasMMX())
44778 weight = CW_SpecificReg;
44781 unsigned Size = StringRef(constraint).size();
44782 // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
44783 char NextChar = Size == 2 ? constraint[1] : 'i';
44786 switch (NextChar) {
44792 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
44793 return CW_SpecificReg;
44795 // Conditional OpMask regs (AVX512)
44797 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
44798 return CW_Register;
44802 if (type->isX86_MMXTy() && Subtarget.hasMMX())
44805 // Any SSE reg when ISA >= SSE2, same as 'Y'
44809 if (!Subtarget.hasSSE2())
44813 // Fall through (handle "Y" constraint).
44817 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
44818 weight = CW_Register;
44821 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
44822 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
44823 weight = CW_Register;
44826 // Enable conditional vector operations using %k<#> registers.
44827 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
44828 weight = CW_Register;
44831 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
44832 if (C->getZExtValue() <= 31)
44833 weight = CW_Constant;
44837 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44838 if (C->getZExtValue() <= 63)
44839 weight = CW_Constant;
44843 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44844 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
44845 weight = CW_Constant;
44849 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44850 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
44851 weight = CW_Constant;
44855 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44856 if (C->getZExtValue() <= 3)
44857 weight = CW_Constant;
44861 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44862 if (C->getZExtValue() <= 0xff)
44863 weight = CW_Constant;
44868 if (isa<ConstantFP>(CallOperandVal)) {
44869 weight = CW_Constant;
44873 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44874 if ((C->getSExtValue() >= -0x80000000LL) &&
44875 (C->getSExtValue() <= 0x7fffffffLL))
44876 weight = CW_Constant;
44880 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
44881 if (C->getZExtValue() <= 0xffffffff)
44882 weight = CW_Constant;
44889 /// Try to replace an X constraint, which matches anything, with another that
44890 /// has more specific requirements based on the type of the corresponding
44892 const char *X86TargetLowering::
44893 LowerXConstraint(EVT ConstraintVT) const {
44894 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
44895 // 'f' like normal targets.
44896 if (ConstraintVT.isFloatingPoint()) {
44897 if (Subtarget.hasSSE2())
44899 if (Subtarget.hasSSE1())
44903 return TargetLowering::LowerXConstraint(ConstraintVT);
44906 // Lower @cc targets via setcc.
44907 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
44908 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
44909 SelectionDAG &DAG) const {
44910 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
44911 if (Cond == X86::COND_INVALID)
44913 // Check that return type is valid.
44914 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
44915 OpInfo.ConstraintVT.getSizeInBits() < 8)
44916 report_fatal_error("Flag output operand is of invalid type");
44918 // Get EFLAGS register. Only update chain when copyfrom is glued.
44919 if (Flag.getNode()) {
44920 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
44921 Chain = Flag.getValue(1);
44923 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
44924 // Extract CC code.
44925 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
44926 // Extend to 32-bits
44927 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
44932 /// Lower the specified operand into the Ops vector.
44933 /// If it is invalid, don't add anything to Ops.
44934 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
44935 std::string &Constraint,
44936 std::vector<SDValue>&Ops,
44937 SelectionDAG &DAG) const {
44940 // Only support length 1 constraints for now.
44941 if (Constraint.length() > 1) return;
44943 char ConstraintLetter = Constraint[0];
44944 switch (ConstraintLetter) {
44947 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44948 if (C->getZExtValue() <= 31) {
44949 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44950 Op.getValueType());
44956 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44957 if (C->getZExtValue() <= 63) {
44958 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44959 Op.getValueType());
44965 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44966 if (isInt<8>(C->getSExtValue())) {
44967 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44968 Op.getValueType());
44974 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44975 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
44976 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
44977 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
44978 Op.getValueType());
44984 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44985 if (C->getZExtValue() <= 3) {
44986 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44987 Op.getValueType());
44993 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
44994 if (C->getZExtValue() <= 255) {
44995 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
44996 Op.getValueType());
45002 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45003 if (C->getZExtValue() <= 127) {
45004 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45005 Op.getValueType());
45011 // 32-bit signed value
45012 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45013 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45014 C->getSExtValue())) {
45015 // Widen to 64 bits here to get it sign extended.
45016 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
45019 // FIXME gcc accepts some relocatable values here too, but only in certain
45020 // memory models; it's complicated.
45025 // 32-bit unsigned value
45026 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45027 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45028 C->getZExtValue())) {
45029 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45030 Op.getValueType());
45034 // FIXME gcc accepts some relocatable values here too, but only in certain
45035 // memory models; it's complicated.
45039 // Literal immediates are always ok.
45040 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
45041 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
45042 BooleanContent BCont = getBooleanContents(MVT::i64);
45043 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
45044 : ISD::SIGN_EXTEND;
45045 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
45046 : CST->getSExtValue();
45047 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
45051 // In any sort of PIC mode addresses need to be computed at runtime by
45052 // adding in a register or some sort of table lookup. These can't
45053 // be used as immediates.
45054 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
45057 // If we are in non-pic codegen mode, we allow the address of a global (with
45058 // an optional displacement) to be used with 'i'.
45059 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
45060 // If we require an extra load to get this address, as in PIC mode, we
45061 // can't accept it.
45062 if (isGlobalStubReference(
45063 Subtarget.classifyGlobalReference(GA->getGlobal())))
45069 if (Result.getNode()) {
45070 Ops.push_back(Result);
45073 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
45076 /// Check if \p RC is a general purpose register class.
45077 /// I.e., GR* or one of their variant.
45078 static bool isGRClass(const TargetRegisterClass &RC) {
45079 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
45080 RC.hasSuperClassEq(&X86::GR16RegClass) ||
45081 RC.hasSuperClassEq(&X86::GR32RegClass) ||
45082 RC.hasSuperClassEq(&X86::GR64RegClass) ||
45083 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
45086 /// Check if \p RC is a vector register class.
45087 /// I.e., FR* / VR* or one of their variant.
45088 static bool isFRClass(const TargetRegisterClass &RC) {
45089 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
45090 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
45091 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
45092 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
45093 RC.hasSuperClassEq(&X86::VR512RegClass);
45096 /// Check if \p RC is a mask register class.
45097 /// I.e., VK* or one of their variant.
45098 static bool isVKClass(const TargetRegisterClass &RC) {
45099 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
45100 RC.hasSuperClassEq(&X86::VK2RegClass) ||
45101 RC.hasSuperClassEq(&X86::VK4RegClass) ||
45102 RC.hasSuperClassEq(&X86::VK8RegClass) ||
45103 RC.hasSuperClassEq(&X86::VK16RegClass) ||
45104 RC.hasSuperClassEq(&X86::VK32RegClass) ||
45105 RC.hasSuperClassEq(&X86::VK64RegClass);
45108 std::pair<unsigned, const TargetRegisterClass *>
45109 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
45110 StringRef Constraint,
45112 // First, see if this is a constraint that directly corresponds to an LLVM
45114 if (Constraint.size() == 1) {
45115 // GCC Constraint Letters
45116 switch (Constraint[0]) {
45118 // 'A' means [ER]AX + [ER]DX.
45120 if (Subtarget.is64Bit())
45121 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
45122 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
45123 "Expecting 64, 32 or 16 bit subtarget");
45124 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45126 // TODO: Slight differences here in allocation order and leaving
45127 // RIP in the class. Do they matter any more here than they do
45128 // in the normal allocation?
45130 if (Subtarget.hasAVX512()) {
45132 return std::make_pair(0U, &X86::VK1RegClass);
45134 return std::make_pair(0U, &X86::VK8RegClass);
45135 if (VT == MVT::i16)
45136 return std::make_pair(0U, &X86::VK16RegClass);
45138 if (Subtarget.hasBWI()) {
45139 if (VT == MVT::i32)
45140 return std::make_pair(0U, &X86::VK32RegClass);
45141 if (VT == MVT::i64)
45142 return std::make_pair(0U, &X86::VK64RegClass);
45145 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
45146 if (Subtarget.is64Bit()) {
45147 if (VT == MVT::i32 || VT == MVT::f32)
45148 return std::make_pair(0U, &X86::GR32RegClass);
45149 if (VT == MVT::i16)
45150 return std::make_pair(0U, &X86::GR16RegClass);
45151 if (VT == MVT::i8 || VT == MVT::i1)
45152 return std::make_pair(0U, &X86::GR8RegClass);
45153 if (VT == MVT::i64 || VT == MVT::f64)
45154 return std::make_pair(0U, &X86::GR64RegClass);
45158 // 32-bit fallthrough
45159 case 'Q': // Q_REGS
45160 if (VT == MVT::i32 || VT == MVT::f32)
45161 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
45162 if (VT == MVT::i16)
45163 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
45164 if (VT == MVT::i8 || VT == MVT::i1)
45165 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
45166 if (VT == MVT::i64)
45167 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
45169 case 'r': // GENERAL_REGS
45170 case 'l': // INDEX_REGS
45171 if (VT == MVT::i8 || VT == MVT::i1)
45172 return std::make_pair(0U, &X86::GR8RegClass);
45173 if (VT == MVT::i16)
45174 return std::make_pair(0U, &X86::GR16RegClass);
45175 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
45176 return std::make_pair(0U, &X86::GR32RegClass);
45177 return std::make_pair(0U, &X86::GR64RegClass);
45178 case 'R': // LEGACY_REGS
45179 if (VT == MVT::i8 || VT == MVT::i1)
45180 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
45181 if (VT == MVT::i16)
45182 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
45183 if (VT == MVT::i32 || !Subtarget.is64Bit())
45184 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
45185 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
45186 case 'f': // FP Stack registers.
45187 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
45188 // value to the correct fpstack register class.
45189 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
45190 return std::make_pair(0U, &X86::RFP32RegClass);
45191 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
45192 return std::make_pair(0U, &X86::RFP64RegClass);
45193 return std::make_pair(0U, &X86::RFP80RegClass);
45194 case 'y': // MMX_REGS if MMX allowed.
45195 if (!Subtarget.hasMMX()) break;
45196 return std::make_pair(0U, &X86::VR64RegClass);
45197 case 'Y': // SSE_REGS if SSE2 allowed
45198 if (!Subtarget.hasSSE2()) break;
45201 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
45202 if (!Subtarget.hasSSE1()) break;
45203 bool VConstraint = (Constraint[0] == 'v');
45205 switch (VT.SimpleTy) {
45207 // Scalar SSE types.
45210 if (VConstraint && Subtarget.hasVLX())
45211 return std::make_pair(0U, &X86::FR32XRegClass);
45212 return std::make_pair(0U, &X86::FR32RegClass);
45215 if (VConstraint && Subtarget.hasVLX())
45216 return std::make_pair(0U, &X86::FR64XRegClass);
45217 return std::make_pair(0U, &X86::FR64RegClass);
45218 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
45226 if (VConstraint && Subtarget.hasVLX())
45227 return std::make_pair(0U, &X86::VR128XRegClass);
45228 return std::make_pair(0U, &X86::VR128RegClass);
45236 if (VConstraint && Subtarget.hasVLX())
45237 return std::make_pair(0U, &X86::VR256XRegClass);
45238 if (Subtarget.hasAVX())
45239 return std::make_pair(0U, &X86::VR256RegClass);
45245 if (!Subtarget.hasAVX512()) break;
45247 return std::make_pair(0U, &X86::VR512RegClass);
45248 return std::make_pair(0U, &X86::VR512_0_15RegClass);
45252 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
45253 switch (Constraint[1]) {
45259 return getRegForInlineAsmConstraint(TRI, "Y", VT);
45261 if (!Subtarget.hasMMX()) break;
45262 return std::make_pair(0U, &X86::VR64RegClass);
45265 if (!Subtarget.hasSSE1()) break;
45266 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
45268 // This register class doesn't allocate k0 for masked vector operation.
45269 if (Subtarget.hasAVX512()) {
45271 return std::make_pair(0U, &X86::VK1WMRegClass);
45273 return std::make_pair(0U, &X86::VK8WMRegClass);
45274 if (VT == MVT::i16)
45275 return std::make_pair(0U, &X86::VK16WMRegClass);
45277 if (Subtarget.hasBWI()) {
45278 if (VT == MVT::i32)
45279 return std::make_pair(0U, &X86::VK32WMRegClass);
45280 if (VT == MVT::i64)
45281 return std::make_pair(0U, &X86::VK64WMRegClass);
45287 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45288 return std::make_pair(0U, &X86::GR32RegClass);
45290 // Use the default implementation in TargetLowering to convert the register
45291 // constraint into a member of a register class.
45292 std::pair<unsigned, const TargetRegisterClass*> Res;
45293 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
45295 // Not found as a standard register?
45297 // Map st(0) -> st(7) -> ST0
45298 if (Constraint.size() == 7 && Constraint[0] == '{' &&
45299 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
45300 Constraint[3] == '(' &&
45301 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
45302 Constraint[5] == ')' && Constraint[6] == '}') {
45303 // st(7) is not allocatable and thus not a member of RFP80. Return
45304 // singleton class in cases where we have a reference to it.
45305 if (Constraint[4] == '7')
45306 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
45307 return std::make_pair(X86::FP0 + Constraint[4] - '0',
45308 &X86::RFP80RegClass);
45311 // GCC allows "st(0)" to be called just plain "st".
45312 if (StringRef("{st}").equals_lower(Constraint))
45313 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
45316 if (StringRef("{flags}").equals_lower(Constraint))
45317 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
45320 if (StringRef("{dirflag}").equals_lower(Constraint))
45321 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
45324 if (StringRef("{fpsr}").equals_lower(Constraint))
45325 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
45330 // Make sure it isn't a register that requires 64-bit mode.
45331 if (!Subtarget.is64Bit() &&
45332 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
45333 TRI->getEncodingValue(Res.first) >= 8) {
45334 // Register requires REX prefix, but we're in 32-bit mode.
45335 return std::make_pair(0, nullptr);
45338 // Make sure it isn't a register that requires AVX512.
45339 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
45340 TRI->getEncodingValue(Res.first) & 0x10) {
45341 // Register requires EVEX prefix.
45342 return std::make_pair(0, nullptr);
45345 // Otherwise, check to see if this is a register class of the wrong value
45346 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
45347 // turn into {ax},{dx}.
45348 // MVT::Other is used to specify clobber names.
45349 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
45350 return Res; // Correct type already, nothing to do.
45352 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
45353 // return "eax". This should even work for things like getting 64bit integer
45354 // registers when given an f64 type.
45355 const TargetRegisterClass *Class = Res.second;
45356 // The generic code will match the first register class that contains the
45357 // given register. Thus, based on the ordering of the tablegened file,
45358 // the "plain" GR classes might not come first.
45359 // Therefore, use a helper method.
45360 if (isGRClass(*Class)) {
45361 unsigned Size = VT.getSizeInBits();
45362 if (Size == 1) Size = 8;
45363 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
45365 bool is64Bit = Subtarget.is64Bit();
45366 const TargetRegisterClass *RC =
45367 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
45368 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
45369 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
45370 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
45372 if (Size == 64 && !is64Bit) {
45373 // Model GCC's behavior here and select a fixed pair of 32-bit
45377 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45379 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
45381 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
45383 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
45385 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
45387 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
45389 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
45391 return std::make_pair(0, nullptr);
45394 if (RC && RC->contains(DestReg))
45395 return std::make_pair(DestReg, RC);
45398 // No register found/type mismatch.
45399 return std::make_pair(0, nullptr);
45400 } else if (isFRClass(*Class)) {
45401 // Handle references to XMM physical registers that got mapped into the
45402 // wrong class. This can happen with constraints like {xmm0} where the
45403 // target independent register mapper will just pick the first match it can
45404 // find, ignoring the required type.
45406 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
45407 if (VT == MVT::f32 || VT == MVT::i32)
45408 Res.second = &X86::FR32XRegClass;
45409 else if (VT == MVT::f64 || VT == MVT::i64)
45410 Res.second = &X86::FR64XRegClass;
45411 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
45412 Res.second = &X86::VR128XRegClass;
45413 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
45414 Res.second = &X86::VR256XRegClass;
45415 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
45416 Res.second = &X86::VR512RegClass;
45418 // Type mismatch and not a clobber: Return an error;
45420 Res.second = nullptr;
45422 } else if (isVKClass(*Class)) {
45424 Res.second = &X86::VK1RegClass;
45425 else if (VT == MVT::i8)
45426 Res.second = &X86::VK8RegClass;
45427 else if (VT == MVT::i16)
45428 Res.second = &X86::VK16RegClass;
45429 else if (VT == MVT::i32)
45430 Res.second = &X86::VK32RegClass;
45431 else if (VT == MVT::i64)
45432 Res.second = &X86::VK64RegClass;
45434 // Type mismatch and not a clobber: Return an error;
45436 Res.second = nullptr;
45443 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
45444 const AddrMode &AM, Type *Ty,
45445 unsigned AS) const {
45446 // Scaling factors are not free at all.
45447 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
45448 // will take 2 allocations in the out of order engine instead of 1
45449 // for plain addressing mode, i.e. inst (reg1).
45451 // vaddps (%rsi,%rdx), %ymm0, %ymm1
45452 // Requires two allocations (one for the load, one for the computation)
45454 // vaddps (%rsi), %ymm0, %ymm1
45455 // Requires just 1 allocation, i.e., freeing allocations for other operations
45456 // and having less micro operations to execute.
45458 // For some X86 architectures, this is even worse because for instance for
45459 // stores, the complex addressing mode forces the instruction to use the
45460 // "load" ports instead of the dedicated "store" port.
45461 // E.g., on Haswell:
45462 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
45463 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
45464 if (isLegalAddressingMode(DL, AM, Ty, AS))
45465 // Scale represents reg2 * scale, thus account for 1
45466 // as soon as we use a second register.
45467 return AM.Scale != 0;
45471 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
45472 // Integer division on x86 is expensive. However, when aggressively optimizing
45473 // for code size, we prefer to use a div instruction, as it is usually smaller
45474 // than the alternative sequence.
45475 // The exception to this is vector division. Since x86 doesn't have vector
45476 // integer division, leaving the division as-is is a loss even in terms of
45477 // size, because it will have to be scalarized, while the alternative code
45478 // sequence can be performed in vector form.
45480 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
45481 return OptSize && !VT.isVector();
45484 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
45485 if (!Subtarget.is64Bit())
45488 // Update IsSplitCSR in X86MachineFunctionInfo.
45489 X86MachineFunctionInfo *AFI =
45490 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
45491 AFI->setIsSplitCSR(true);
45494 void X86TargetLowering::insertCopiesSplitCSR(
45495 MachineBasicBlock *Entry,
45496 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
45497 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
45498 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
45502 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
45503 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
45504 MachineBasicBlock::iterator MBBI = Entry->begin();
45505 for (const MCPhysReg *I = IStart; *I; ++I) {
45506 const TargetRegisterClass *RC = nullptr;
45507 if (X86::GR64RegClass.contains(*I))
45508 RC = &X86::GR64RegClass;
45510 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
45512 unsigned NewVR = MRI->createVirtualRegister(RC);
45513 // Create copy from CSR to a virtual register.
45514 // FIXME: this currently does not emit CFI pseudo-instructions, it works
45515 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
45516 // nounwind. If we want to generalize this later, we may need to emit
45517 // CFI pseudo-instructions.
45519 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
45520 "Function should be nounwind in insertCopiesSplitCSR!");
45521 Entry->addLiveIn(*I);
45522 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
45525 // Insert the copy-back instructions right before the terminator.
45526 for (auto *Exit : Exits)
45527 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
45528 TII->get(TargetOpcode::COPY), *I)
45533 bool X86TargetLowering::supportSwiftError() const {
45534 return Subtarget.is64Bit();
45537 /// Returns the name of the symbol used to emit stack probes or the empty
45538 /// string if not applicable.
45540 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
45541 // If the function specifically requests stack probes, emit them.
45542 if (MF.getFunction().hasFnAttribute("probe-stack"))
45543 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
45545 // Generally, if we aren't on Windows, the platform ABI does not include
45546 // support for stack probes, so don't emit them.
45547 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
45548 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
45551 // We need a stack probe to conform to the Windows ABI. Choose the right
45553 if (Subtarget.is64Bit())
45554 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
45555 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";