1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "Utils/X86ShuffleDecode.h"
16 #include "X86CallingConv.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86IntrinsicsInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/WinEHFuncInfo.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GlobalAlias.h"
45 #include "llvm/IR/GlobalVariable.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/MC/MCAsmInfo.h"
49 #include "llvm/MC/MCContext.h"
50 #include "llvm/MC/MCExpr.h"
51 #include "llvm/MC/MCSymbol.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Target/TargetOptions.h"
64 #define DEBUG_TYPE "x86-isel"
66 STATISTIC(NumTailCalls, "Number of tail calls");
68 static cl::opt<int> ExperimentalPrefLoopAlignment(
69 "x86-experimental-pref-loop-alignment", cl::init(4),
71 "Sets the preferable loop alignment for experiments (as log2 bytes)"
72 "(the last x86-experimental-pref-loop-alignment bits"
73 " of the loop header PC will be 0)."),
77 static cl::opt<bool> EnableOldKNLABI(
78 "x86-enable-old-knl-abi", cl::init(false),
79 cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
80 "one ZMM register on AVX512F, but not AVX512BW targets."),
83 static cl::opt<bool> MulConstantOptimization(
84 "mul-constant-optimization", cl::init(true),
85 cl::desc("Replace 'mul x, Const' with more effective instructions like "
89 static cl::opt<bool> ExperimentalUnorderedISEL(
90 "x86-experimental-unordered-atomic-isel", cl::init(false),
91 cl::desc("Use LoadSDNode and StoreSDNode instead of "
92 "AtomicSDNode for unordered atomic loads and "
93 "stores respectively."),
96 /// Call this when the user attempts to do something unsupported, like
97 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98 /// report_fatal_error, so calling code should attempt to recover without
100 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
102 MachineFunction &MF = DAG.getMachineFunction();
103 DAG.getContext()->diagnose(
104 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
107 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
108 const X86Subtarget &STI)
109 : TargetLowering(TM), Subtarget(STI) {
110 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
111 X86ScalarSSEf64 = Subtarget.hasSSE2();
112 X86ScalarSSEf32 = Subtarget.hasSSE1();
113 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
115 // Set up the TargetLowering object.
117 // X86 is weird. It always uses i8 for shift amounts and setcc results.
118 setBooleanContents(ZeroOrOneBooleanContent);
119 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
120 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
122 // For 64-bit, since we have so many registers, use the ILP scheduler.
123 // For 32-bit, use the register pressure specific scheduling.
124 // For Atom, always use ILP scheduling.
125 if (Subtarget.isAtom())
126 setSchedulingPreference(Sched::ILP);
127 else if (Subtarget.is64Bit())
128 setSchedulingPreference(Sched::ILP);
130 setSchedulingPreference(Sched::RegPressure);
131 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
132 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
134 // Bypass expensive divides and use cheaper ones.
135 if (TM.getOptLevel() >= CodeGenOpt::Default) {
136 if (Subtarget.hasSlowDivide32())
137 addBypassSlowDiv(32, 8);
138 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
139 addBypassSlowDiv(64, 32);
142 if (Subtarget.isTargetWindowsMSVC() ||
143 Subtarget.isTargetWindowsItanium()) {
144 // Setup Windows compiler runtime calls.
145 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
146 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
147 setLibcallName(RTLIB::SREM_I64, "_allrem");
148 setLibcallName(RTLIB::UREM_I64, "_aullrem");
149 setLibcallName(RTLIB::MUL_I64, "_allmul");
150 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
151 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
152 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
153 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
154 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
157 if (Subtarget.isTargetDarwin()) {
158 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
159 setUseUnderscoreSetJmp(false);
160 setUseUnderscoreLongJmp(false);
161 } else if (Subtarget.isTargetWindowsGNU()) {
162 // MS runtime is weird: it exports _setjmp, but longjmp!
163 setUseUnderscoreSetJmp(true);
164 setUseUnderscoreLongJmp(false);
166 setUseUnderscoreSetJmp(true);
167 setUseUnderscoreLongJmp(true);
170 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
171 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
172 // FIXME: Should we be limitting the atomic size on other configs? Default is
174 if (!Subtarget.hasCmpxchg8b())
175 setMaxAtomicSizeInBitsSupported(32);
177 // Set up the register classes.
178 addRegisterClass(MVT::i8, &X86::GR8RegClass);
179 addRegisterClass(MVT::i16, &X86::GR16RegClass);
180 addRegisterClass(MVT::i32, &X86::GR32RegClass);
181 if (Subtarget.is64Bit())
182 addRegisterClass(MVT::i64, &X86::GR64RegClass);
184 for (MVT VT : MVT::integer_valuetypes())
185 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
187 // We don't accept any truncstore of integer registers.
188 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
189 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
190 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
191 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
192 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
193 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
195 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
197 // SETOEQ and SETUNE require checking two conditions.
198 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
199 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
200 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
201 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
202 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
203 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
206 if (Subtarget.hasCMov()) {
207 setOperationAction(ISD::ABS , MVT::i16 , Custom);
208 setOperationAction(ISD::ABS , MVT::i32 , Custom);
210 setOperationAction(ISD::ABS , MVT::i64 , Custom);
213 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
214 setOperationAction(ShiftOp , MVT::i16 , Custom);
215 setOperationAction(ShiftOp , MVT::i32 , Custom);
216 if (Subtarget.is64Bit())
217 setOperationAction(ShiftOp , MVT::i64 , Custom);
220 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
222 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
223 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
224 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
226 if (!Subtarget.useSoftFloat()) {
227 // We have an algorithm for SSE2->double, and we turn this into a
228 // 64-bit FILD followed by conditional FADD for other targets.
229 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
230 // We have an algorithm for SSE2, and we turn this into a 64-bit
231 // FILD or VCVTUSI2SS/SD for other targets.
232 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
234 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
237 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
239 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
240 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
242 if (!Subtarget.useSoftFloat()) {
243 // SSE has no i16 to fp conversion, only i32.
244 if (X86ScalarSSEf32) {
245 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
246 // f32 and f64 cases are Legal, f80 case is not
247 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
249 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
250 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
253 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
254 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
257 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
259 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
260 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
262 if (!Subtarget.useSoftFloat()) {
263 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
264 // are Legal, f80 is custom lowered.
265 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
266 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
268 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
269 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
271 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
272 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
273 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
276 // Handle FP_TO_UINT by promoting the destination to a larger signed
278 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
279 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
280 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
282 if (!Subtarget.useSoftFloat()) {
283 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
284 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
287 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
288 if (!X86ScalarSSEf64) {
289 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
290 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
291 if (Subtarget.is64Bit()) {
292 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
293 // Without SSE, i64->f64 goes through memory.
294 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
296 } else if (!Subtarget.is64Bit())
297 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
299 // Scalar integer divide and remainder are lowered to use operations that
300 // produce two results, to match the available instructions. This exposes
301 // the two-result form to trivial CSE, which is able to combine x/y and x%y
302 // into a single instruction.
304 // Scalar integer multiply-high is also lowered to use two-result
305 // operations, to match the available instructions. However, plain multiply
306 // (low) operations are left as Legal, as there are single-result
307 // instructions for this in x86. Using the two-result multiply instructions
308 // when both high and low results are needed must be arranged by dagcombine.
309 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
310 setOperationAction(ISD::MULHS, VT, Expand);
311 setOperationAction(ISD::MULHU, VT, Expand);
312 setOperationAction(ISD::SDIV, VT, Expand);
313 setOperationAction(ISD::UDIV, VT, Expand);
314 setOperationAction(ISD::SREM, VT, Expand);
315 setOperationAction(ISD::UREM, VT, Expand);
318 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
319 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
320 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
321 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
322 setOperationAction(ISD::BR_CC, VT, Expand);
323 setOperationAction(ISD::SELECT_CC, VT, Expand);
325 if (Subtarget.is64Bit())
326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
327 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
328 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
329 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
331 setOperationAction(ISD::FREM , MVT::f32 , Expand);
332 setOperationAction(ISD::FREM , MVT::f64 , Expand);
333 setOperationAction(ISD::FREM , MVT::f80 , Expand);
334 setOperationAction(ISD::FREM , MVT::f128 , Expand);
335 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
337 // Promote the i8 variants and force them on up to i32 which has a shorter
339 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
340 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
341 if (!Subtarget.hasBMI()) {
342 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
343 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
344 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
345 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
346 if (Subtarget.is64Bit()) {
347 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
348 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
352 if (Subtarget.hasLZCNT()) {
353 // When promoting the i8 variants, force them to i32 for a shorter
355 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
356 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
358 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
359 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
360 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
361 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
362 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
363 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
364 if (Subtarget.is64Bit()) {
365 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
366 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
370 // Special handling for half-precision floating point conversions.
371 // If we don't have F16C support, then lower half float conversions
372 // into library calls.
373 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
374 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
375 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
378 // There's never any support for operations beyond MVT::f32.
379 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
380 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
381 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
382 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
383 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
384 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
386 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
387 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
388 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
389 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
390 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
391 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
392 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
393 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
395 if (Subtarget.hasPOPCNT()) {
396 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
398 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
399 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
400 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
401 if (Subtarget.is64Bit())
402 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
404 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
407 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
409 if (!Subtarget.hasMOVBE())
410 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
412 // These should be promoted to a larger select which is supported.
413 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
414 // X86 wants to expand cmov itself.
415 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
416 setOperationAction(ISD::SELECT, VT, Custom);
417 setOperationAction(ISD::SETCC, VT, Custom);
419 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
420 if (VT == MVT::i64 && !Subtarget.is64Bit())
422 setOperationAction(ISD::SELECT, VT, Custom);
423 setOperationAction(ISD::SETCC, VT, Custom);
426 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
427 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
428 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
430 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
431 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
432 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
433 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
434 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
435 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
436 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
437 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
440 for (auto VT : { MVT::i32, MVT::i64 }) {
441 if (VT == MVT::i64 && !Subtarget.is64Bit())
443 setOperationAction(ISD::ConstantPool , VT, Custom);
444 setOperationAction(ISD::JumpTable , VT, Custom);
445 setOperationAction(ISD::GlobalAddress , VT, Custom);
446 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
447 setOperationAction(ISD::ExternalSymbol , VT, Custom);
448 setOperationAction(ISD::BlockAddress , VT, Custom);
451 // 64-bit shl, sra, srl (iff 32-bit x86)
452 for (auto VT : { MVT::i32, MVT::i64 }) {
453 if (VT == MVT::i64 && !Subtarget.is64Bit())
455 setOperationAction(ISD::SHL_PARTS, VT, Custom);
456 setOperationAction(ISD::SRA_PARTS, VT, Custom);
457 setOperationAction(ISD::SRL_PARTS, VT, Custom);
460 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
461 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
463 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
465 // Expand certain atomics
466 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
467 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
468 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
469 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
470 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
471 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
472 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
473 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
476 if (!Subtarget.is64Bit())
477 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
479 if (Subtarget.hasCmpxchg16b()) {
480 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
483 // FIXME - use subtarget debug flags
484 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
485 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
486 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
487 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
490 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
491 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
493 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
494 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
496 setOperationAction(ISD::TRAP, MVT::Other, Legal);
497 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
499 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
500 setOperationAction(ISD::VASTART , MVT::Other, Custom);
501 setOperationAction(ISD::VAEND , MVT::Other, Expand);
502 bool Is64Bit = Subtarget.is64Bit();
503 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
504 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
506 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
507 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
509 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
511 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
512 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
513 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
515 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
516 // f32 and f64 use SSE.
517 // Set up the FP register classes.
518 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
519 : &X86::FR32RegClass);
520 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
521 : &X86::FR64RegClass);
523 // Disable f32->f64 extload as we can only generate this in one instruction
524 // under optsize. So its easier to pattern match (fpext (load)) for that
525 // case instead of needing to emit 2 instructions for extload in the
527 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
529 for (auto VT : { MVT::f32, MVT::f64 }) {
530 // Use ANDPD to simulate FABS.
531 setOperationAction(ISD::FABS, VT, Custom);
533 // Use XORP to simulate FNEG.
534 setOperationAction(ISD::FNEG, VT, Custom);
536 // Use ANDPD and ORPD to simulate FCOPYSIGN.
537 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
539 // These might be better off as horizontal vector ops.
540 setOperationAction(ISD::FADD, VT, Custom);
541 setOperationAction(ISD::FSUB, VT, Custom);
543 // We don't support sin/cos/fmod
544 setOperationAction(ISD::FSIN , VT, Expand);
545 setOperationAction(ISD::FCOS , VT, Expand);
546 setOperationAction(ISD::FSINCOS, VT, Expand);
549 // Lower this to MOVMSK plus an AND.
550 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
551 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
553 } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
554 // Use SSE for f32, x87 for f64.
555 // Set up the FP register classes.
556 addRegisterClass(MVT::f32, &X86::FR32RegClass);
558 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
560 // Use ANDPS to simulate FABS.
561 setOperationAction(ISD::FABS , MVT::f32, Custom);
563 // Use XORP to simulate FNEG.
564 setOperationAction(ISD::FNEG , MVT::f32, Custom);
567 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
569 // Use ANDPS and ORPS to simulate FCOPYSIGN.
571 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
572 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
574 // We don't support sin/cos/fmod
575 setOperationAction(ISD::FSIN , MVT::f32, Expand);
576 setOperationAction(ISD::FCOS , MVT::f32, Expand);
577 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
580 // Always expand sin/cos functions even though x87 has an instruction.
581 setOperationAction(ISD::FSIN, MVT::f64, Expand);
582 setOperationAction(ISD::FCOS, MVT::f64, Expand);
583 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
586 // f32 and f64 in x87.
587 // Set up the FP register classes.
588 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
589 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
591 for (auto VT : { MVT::f32, MVT::f64 }) {
592 setOperationAction(ISD::UNDEF, VT, Expand);
593 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
595 // Always expand sin/cos functions even though x87 has an instruction.
596 setOperationAction(ISD::FSIN , VT, Expand);
597 setOperationAction(ISD::FCOS , VT, Expand);
598 setOperationAction(ISD::FSINCOS, VT, Expand);
602 // Expand FP32 immediates into loads from the stack, save special cases.
603 if (isTypeLegal(MVT::f32)) {
604 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
605 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
606 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
607 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
608 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
609 } else // SSE immediates.
610 addLegalFPImmediate(APFloat(+0.0f)); // xorps
612 // Expand FP64 immediates into loads from the stack, save special cases.
613 if (isTypeLegal(MVT::f64)) {
614 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
615 addLegalFPImmediate(APFloat(+0.0)); // FLD0
616 addLegalFPImmediate(APFloat(+1.0)); // FLD1
617 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
618 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
619 } else // SSE immediates.
620 addLegalFPImmediate(APFloat(+0.0)); // xorpd
623 // We don't support FMA.
624 setOperationAction(ISD::FMA, MVT::f64, Expand);
625 setOperationAction(ISD::FMA, MVT::f32, Expand);
627 // f80 always uses X87.
629 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
630 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
631 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
633 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
634 addLegalFPImmediate(TmpFlt); // FLD0
636 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
639 APFloat TmpFlt2(+1.0);
640 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
642 addLegalFPImmediate(TmpFlt2); // FLD1
643 TmpFlt2.changeSign();
644 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
647 // Always expand sin/cos functions even though x87 has an instruction.
648 setOperationAction(ISD::FSIN , MVT::f80, Expand);
649 setOperationAction(ISD::FCOS , MVT::f80, Expand);
650 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
652 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
653 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
654 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
655 setOperationAction(ISD::FRINT, MVT::f80, Expand);
656 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
657 setOperationAction(ISD::FMA, MVT::f80, Expand);
658 setOperationAction(ISD::LROUND, MVT::f80, Expand);
659 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
660 setOperationAction(ISD::LRINT, MVT::f80, Expand);
661 setOperationAction(ISD::LLRINT, MVT::f80, Expand);
664 // f128 uses xmm registers, but most operations require libcalls.
665 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
666 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
667 : &X86::VR128RegClass);
669 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
671 setOperationAction(ISD::FADD, MVT::f128, Custom);
672 setOperationAction(ISD::FSUB, MVT::f128, Custom);
673 setOperationAction(ISD::FDIV, MVT::f128, Custom);
674 setOperationAction(ISD::FMUL, MVT::f128, Custom);
675 setOperationAction(ISD::FMA, MVT::f128, Expand);
677 setOperationAction(ISD::FABS, MVT::f128, Custom);
678 setOperationAction(ISD::FNEG, MVT::f128, Custom);
679 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
681 setOperationAction(ISD::FSIN, MVT::f128, Expand);
682 setOperationAction(ISD::FCOS, MVT::f128, Expand);
683 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
684 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
686 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
687 // We need to custom handle any FP_ROUND with an f128 input, but
688 // LegalizeDAG uses the result type to know when to run a custom handler.
689 // So we have to list all legal floating point result types here.
690 if (isTypeLegal(MVT::f32)) {
691 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
692 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
694 if (isTypeLegal(MVT::f64)) {
695 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
696 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
698 if (isTypeLegal(MVT::f80)) {
699 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
700 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
703 setOperationAction(ISD::SETCC, MVT::f128, Custom);
705 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
706 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
707 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
708 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
709 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
710 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
713 // Always use a library call for pow.
714 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
715 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
716 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
717 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
719 setOperationAction(ISD::FLOG, MVT::f80, Expand);
720 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
721 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
722 setOperationAction(ISD::FEXP, MVT::f80, Expand);
723 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
724 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
725 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
727 // Some FP actions are always expanded for vector types.
728 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
729 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
730 setOperationAction(ISD::FSIN, VT, Expand);
731 setOperationAction(ISD::FSINCOS, VT, Expand);
732 setOperationAction(ISD::FCOS, VT, Expand);
733 setOperationAction(ISD::FREM, VT, Expand);
734 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
735 setOperationAction(ISD::FPOW, VT, Expand);
736 setOperationAction(ISD::FLOG, VT, Expand);
737 setOperationAction(ISD::FLOG2, VT, Expand);
738 setOperationAction(ISD::FLOG10, VT, Expand);
739 setOperationAction(ISD::FEXP, VT, Expand);
740 setOperationAction(ISD::FEXP2, VT, Expand);
743 // First set operation action for all vector types to either promote
744 // (for widening) or expand (for scalarization). Then we will selectively
745 // turn on ones that can be effectively codegen'd.
746 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
747 setOperationAction(ISD::SDIV, VT, Expand);
748 setOperationAction(ISD::UDIV, VT, Expand);
749 setOperationAction(ISD::SREM, VT, Expand);
750 setOperationAction(ISD::UREM, VT, Expand);
751 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
752 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
753 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
754 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
755 setOperationAction(ISD::FMA, VT, Expand);
756 setOperationAction(ISD::FFLOOR, VT, Expand);
757 setOperationAction(ISD::FCEIL, VT, Expand);
758 setOperationAction(ISD::FTRUNC, VT, Expand);
759 setOperationAction(ISD::FRINT, VT, Expand);
760 setOperationAction(ISD::FNEARBYINT, VT, Expand);
761 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
762 setOperationAction(ISD::MULHS, VT, Expand);
763 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
764 setOperationAction(ISD::MULHU, VT, Expand);
765 setOperationAction(ISD::SDIVREM, VT, Expand);
766 setOperationAction(ISD::UDIVREM, VT, Expand);
767 setOperationAction(ISD::CTPOP, VT, Expand);
768 setOperationAction(ISD::CTTZ, VT, Expand);
769 setOperationAction(ISD::CTLZ, VT, Expand);
770 setOperationAction(ISD::ROTL, VT, Expand);
771 setOperationAction(ISD::ROTR, VT, Expand);
772 setOperationAction(ISD::BSWAP, VT, Expand);
773 setOperationAction(ISD::SETCC, VT, Expand);
774 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
775 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
776 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
777 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
778 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
779 setOperationAction(ISD::TRUNCATE, VT, Expand);
780 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
781 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
782 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
783 setOperationAction(ISD::SELECT_CC, VT, Expand);
784 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
785 setTruncStoreAction(InnerVT, VT, Expand);
787 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
788 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
790 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
791 // types, we have to deal with them whether we ask for Expansion or not.
792 // Setting Expand causes its own optimisation problems though, so leave
794 if (VT.getVectorElementType() == MVT::i1)
795 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
797 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
798 // split/scalarized right now.
799 if (VT.getVectorElementType() == MVT::f16)
800 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
804 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
805 // with -msoft-float, disable use of MMX as well.
806 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
807 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
808 // No operations on x86mmx supported, everything uses intrinsics.
811 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
812 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
813 : &X86::VR128RegClass);
815 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
816 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
817 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
818 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
819 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
820 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
821 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
822 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
823 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
825 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
826 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
828 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Custom);
831 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
832 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
833 : &X86::VR128RegClass);
835 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
836 // registers cannot be used even for integer operations.
837 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
838 : &X86::VR128RegClass);
839 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
840 : &X86::VR128RegClass);
841 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
842 : &X86::VR128RegClass);
843 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
844 : &X86::VR128RegClass);
846 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
847 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
848 setOperationAction(ISD::SDIV, VT, Custom);
849 setOperationAction(ISD::SREM, VT, Custom);
850 setOperationAction(ISD::UDIV, VT, Custom);
851 setOperationAction(ISD::UREM, VT, Custom);
854 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
855 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
856 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
858 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
859 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
860 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
861 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
862 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
863 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
864 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
865 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
866 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
867 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
868 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
869 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
870 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
872 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
873 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
874 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
875 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
876 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
879 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
880 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
881 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
882 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
883 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
884 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
885 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
886 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
887 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
888 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
889 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
890 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
892 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
893 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
894 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
896 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
897 setOperationAction(ISD::SETCC, VT, Custom);
898 setOperationAction(ISD::CTPOP, VT, Custom);
899 setOperationAction(ISD::ABS, VT, Custom);
901 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
902 // setcc all the way to isel and prefer SETGT in some isel patterns.
903 setCondCodeAction(ISD::SETLT, VT, Custom);
904 setCondCodeAction(ISD::SETLE, VT, Custom);
907 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
908 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
909 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
910 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
911 setOperationAction(ISD::VSELECT, VT, Custom);
912 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
915 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
916 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
917 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
918 setOperationAction(ISD::VSELECT, VT, Custom);
920 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
923 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
924 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
927 // Custom lower v2i64 and v2f64 selects.
928 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
929 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
931 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
932 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
934 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
935 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
937 // Custom legalize these to avoid over promotion or custom promotion.
938 setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom);
939 setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom);
940 setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom);
941 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
942 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
943 setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom);
944 setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom);
945 setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom);
946 setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom);
947 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
949 // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
950 // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
951 // split again based on the input type, this will cause an AssertSExt i16 to
952 // be emitted instead of an AssertZExt. This will allow packssdw followed by
953 // packuswb to be used to truncate to v8i8. This is necessary since packusdw
954 // isn't available until sse4.1.
955 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
957 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
958 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
960 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
962 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
963 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
965 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
966 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
968 // We want to legalize this to an f64 load rather than an i64 load on
969 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
971 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
972 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
973 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
974 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
975 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
976 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
978 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
979 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
980 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
981 if (!Subtarget.hasAVX512())
982 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
984 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
985 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
986 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
988 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
990 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
991 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
992 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
993 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
994 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
995 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
997 // In the customized shift lowering, the legal v4i32/v2i64 cases
998 // in AVX2 will be recognized.
999 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1000 setOperationAction(ISD::SRL, VT, Custom);
1001 setOperationAction(ISD::SHL, VT, Custom);
1002 setOperationAction(ISD::SRA, VT, Custom);
1005 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1006 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1008 // With AVX512, expanding (and promoting the shifts) is better.
1009 if (!Subtarget.hasAVX512())
1010 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1013 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1014 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1015 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1016 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1017 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1018 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1019 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1020 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1021 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1023 // These might be better off as horizontal vector ops.
1024 setOperationAction(ISD::ADD, MVT::i16, Custom);
1025 setOperationAction(ISD::ADD, MVT::i32, Custom);
1026 setOperationAction(ISD::SUB, MVT::i16, Custom);
1027 setOperationAction(ISD::SUB, MVT::i32, Custom);
1030 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1031 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1032 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1033 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1034 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1035 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1036 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1039 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1040 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1041 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1042 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1043 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1044 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1045 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1046 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1048 // FIXME: Do we need to handle scalar-to-vector here?
1049 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1051 // We directly match byte blends in the backend as they match the VSELECT
1053 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1055 // SSE41 brings specific instructions for doing vector sign extend even in
1056 // cases where we don't have SRA.
1057 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1058 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1059 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1062 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1063 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1064 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1065 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1066 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1067 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1068 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1069 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1072 // i8 vectors are custom because the source register and source
1073 // source memory operand types are not the same width.
1074 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1077 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1078 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1079 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1080 setOperationAction(ISD::ROTL, VT, Custom);
1082 // XOP can efficiently perform BITREVERSE with VPPERM.
1083 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1084 setOperationAction(ISD::BITREVERSE, VT, Custom);
1086 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1087 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1088 setOperationAction(ISD::BITREVERSE, VT, Custom);
1091 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1092 bool HasInt256 = Subtarget.hasInt256();
1094 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1095 : &X86::VR256RegClass);
1096 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1097 : &X86::VR256RegClass);
1098 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1099 : &X86::VR256RegClass);
1100 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1101 : &X86::VR256RegClass);
1102 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1103 : &X86::VR256RegClass);
1104 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1105 : &X86::VR256RegClass);
1107 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1108 setOperationAction(ISD::FFLOOR, VT, Legal);
1109 setOperationAction(ISD::FCEIL, VT, Legal);
1110 setOperationAction(ISD::FTRUNC, VT, Legal);
1111 setOperationAction(ISD::FRINT, VT, Legal);
1112 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1113 setOperationAction(ISD::FNEG, VT, Custom);
1114 setOperationAction(ISD::FABS, VT, Custom);
1115 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1118 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1119 // even though v8i16 is a legal type.
1120 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1121 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1122 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1124 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1126 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Custom);
1128 if (!Subtarget.hasAVX512())
1129 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1131 // In the customized shift lowering, the legal v8i32/v4i64 cases
1132 // in AVX2 will be recognized.
1133 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1134 setOperationAction(ISD::SRL, VT, Custom);
1135 setOperationAction(ISD::SHL, VT, Custom);
1136 setOperationAction(ISD::SRA, VT, Custom);
1139 // These types need custom splitting if their input is a 128-bit vector.
1140 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1141 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1142 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1143 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1145 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1146 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1148 // With BWI, expanding (and promoting the shifts) is the better.
1149 if (!Subtarget.hasBWI())
1150 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1152 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1153 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1154 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1155 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1156 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1157 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1159 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1160 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1161 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1162 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1165 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1166 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1167 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1168 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1170 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1171 setOperationAction(ISD::SETCC, VT, Custom);
1172 setOperationAction(ISD::CTPOP, VT, Custom);
1173 setOperationAction(ISD::CTLZ, VT, Custom);
1175 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1176 // setcc all the way to isel and prefer SETGT in some isel patterns.
1177 setCondCodeAction(ISD::SETLT, VT, Custom);
1178 setCondCodeAction(ISD::SETLE, VT, Custom);
1181 if (Subtarget.hasAnyFMA()) {
1182 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1183 MVT::v2f64, MVT::v4f64 })
1184 setOperationAction(ISD::FMA, VT, Legal);
1187 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1188 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1189 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1192 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1193 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1194 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1195 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1197 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1198 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1199 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1200 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1201 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1202 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1204 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1205 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1206 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1207 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1208 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1210 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1211 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1212 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1213 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1214 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1215 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1216 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1217 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1219 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1220 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1221 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1222 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1223 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1224 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1227 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1228 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1229 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1233 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1234 // when we have a 256bit-wide blend with immediate.
1235 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1237 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1238 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1239 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1240 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1241 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1242 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1243 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1244 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1248 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1249 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1250 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1251 setOperationAction(ISD::MSTORE, VT, Legal);
1254 // Extract subvector is special because the value type
1255 // (result) is 128-bit but the source is 256-bit wide.
1256 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1257 MVT::v4f32, MVT::v2f64 }) {
1258 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1261 // Custom lower several nodes for 256-bit types.
1262 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1263 MVT::v8f32, MVT::v4f64 }) {
1264 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1265 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1266 setOperationAction(ISD::VSELECT, VT, Custom);
1267 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1268 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1269 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1270 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1271 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1272 setOperationAction(ISD::STORE, VT, Custom);
1276 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1278 // Custom legalize 2x32 to get a little better code.
1279 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1280 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1282 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1283 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1284 setOperationAction(ISD::MGATHER, VT, Custom);
1288 // This block controls legalization of the mask vector sizes that are
1289 // available with AVX512. 512-bit vectors are in a separate block controlled
1290 // by useAVX512Regs.
1291 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1292 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1293 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1294 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1295 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1296 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1298 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1299 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1300 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1302 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1303 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1304 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1305 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1306 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1307 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1309 // There is no byte sized k-register load or store without AVX512DQ.
1310 if (!Subtarget.hasDQI()) {
1311 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1312 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1313 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1314 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1316 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1317 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1318 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1319 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1322 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1323 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1324 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1325 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1326 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1329 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1330 setOperationAction(ISD::ADD, VT, Custom);
1331 setOperationAction(ISD::SUB, VT, Custom);
1332 setOperationAction(ISD::MUL, VT, Custom);
1333 setOperationAction(ISD::SETCC, VT, Custom);
1334 setOperationAction(ISD::SELECT, VT, Custom);
1335 setOperationAction(ISD::TRUNCATE, VT, Custom);
1336 setOperationAction(ISD::UADDSAT, VT, Custom);
1337 setOperationAction(ISD::SADDSAT, VT, Custom);
1338 setOperationAction(ISD::USUBSAT, VT, Custom);
1339 setOperationAction(ISD::SSUBSAT, VT, Custom);
1341 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1342 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1343 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1344 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1345 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1346 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1347 setOperationAction(ISD::VSELECT, VT, Expand);
1350 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1351 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1354 // This block controls legalization for 512-bit operations with 32/64 bit
1355 // elements. 512-bits can be disabled based on prefer-vector-width and
1356 // required-vector-width function attributes.
1357 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1358 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1359 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1360 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1361 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1363 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1364 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1365 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1366 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1367 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1368 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1371 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1372 setOperationAction(ISD::FNEG, VT, Custom);
1373 setOperationAction(ISD::FABS, VT, Custom);
1374 setOperationAction(ISD::FMA, VT, Legal);
1375 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1378 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1379 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
1380 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
1381 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
1382 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1383 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
1384 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
1385 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
1386 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1387 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1389 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f32, Custom);
1391 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1392 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1393 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1394 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1395 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1397 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1398 // to 512-bit rather than use the AVX2 instructions so that we can use
1400 if (!Subtarget.hasVLX()) {
1401 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1402 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1403 setOperationAction(ISD::MLOAD, VT, Custom);
1404 setOperationAction(ISD::MSTORE, VT, Custom);
1408 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1409 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1410 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1411 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1412 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1413 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1414 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1415 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1417 // Need to custom widen this if we don't have AVX512BW.
1418 setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
1419 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
1420 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
1422 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1423 setOperationAction(ISD::FFLOOR, VT, Legal);
1424 setOperationAction(ISD::FCEIL, VT, Legal);
1425 setOperationAction(ISD::FTRUNC, VT, Legal);
1426 setOperationAction(ISD::FRINT, VT, Legal);
1427 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1429 setOperationAction(ISD::SELECT, VT, Custom);
1432 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1433 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1434 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1435 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1438 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1439 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1440 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1441 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1443 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1444 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1446 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1447 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1449 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1450 setOperationAction(ISD::SMAX, VT, Legal);
1451 setOperationAction(ISD::UMAX, VT, Legal);
1452 setOperationAction(ISD::SMIN, VT, Legal);
1453 setOperationAction(ISD::UMIN, VT, Legal);
1454 setOperationAction(ISD::ABS, VT, Legal);
1455 setOperationAction(ISD::SRL, VT, Custom);
1456 setOperationAction(ISD::SHL, VT, Custom);
1457 setOperationAction(ISD::SRA, VT, Custom);
1458 setOperationAction(ISD::CTPOP, VT, Custom);
1459 setOperationAction(ISD::ROTL, VT, Custom);
1460 setOperationAction(ISD::ROTR, VT, Custom);
1461 setOperationAction(ISD::SETCC, VT, Custom);
1462 setOperationAction(ISD::SELECT, VT, Custom);
1464 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1465 // setcc all the way to isel and prefer SETGT in some isel patterns.
1466 setCondCodeAction(ISD::SETLT, VT, Custom);
1467 setCondCodeAction(ISD::SETLE, VT, Custom);
1470 if (Subtarget.hasDQI()) {
1471 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1472 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1473 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1474 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1476 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1479 if (Subtarget.hasCDI()) {
1480 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1481 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1482 setOperationAction(ISD::CTLZ, VT, Legal);
1484 } // Subtarget.hasCDI()
1486 if (Subtarget.hasVPOPCNTDQ()) {
1487 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1488 setOperationAction(ISD::CTPOP, VT, Legal);
1491 // Extract subvector is special because the value type
1492 // (result) is 256-bit but the source is 512-bit wide.
1493 // 128-bit was made Legal under AVX1.
1494 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1495 MVT::v8f32, MVT::v4f64 })
1496 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1498 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1499 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1500 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1501 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1502 setOperationAction(ISD::VSELECT, VT, Custom);
1503 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1504 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1505 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1506 setOperationAction(ISD::MLOAD, VT, Legal);
1507 setOperationAction(ISD::MSTORE, VT, Legal);
1508 setOperationAction(ISD::MGATHER, VT, Custom);
1509 setOperationAction(ISD::MSCATTER, VT, Custom);
1511 if (!Subtarget.hasBWI()) {
1512 // Need to custom split v32i16/v64i8 bitcasts.
1513 setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1514 setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
1516 // Better to split these into two 256-bit ops.
1517 setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
1518 setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
1521 if (Subtarget.hasVBMI2()) {
1522 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1523 setOperationAction(ISD::FSHL, VT, Custom);
1524 setOperationAction(ISD::FSHR, VT, Custom);
1529 // This block controls legalization for operations that don't have
1530 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1532 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1533 // These operations are handled on non-VLX by artificially widening in
1535 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1537 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1538 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1539 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1540 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1541 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1543 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1544 setOperationAction(ISD::SMAX, VT, Legal);
1545 setOperationAction(ISD::UMAX, VT, Legal);
1546 setOperationAction(ISD::SMIN, VT, Legal);
1547 setOperationAction(ISD::UMIN, VT, Legal);
1548 setOperationAction(ISD::ABS, VT, Legal);
1551 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1552 setOperationAction(ISD::ROTL, VT, Custom);
1553 setOperationAction(ISD::ROTR, VT, Custom);
1556 // Custom legalize 2x32 to get a little better code.
1557 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1558 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1560 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1561 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1562 setOperationAction(ISD::MSCATTER, VT, Custom);
1564 if (Subtarget.hasDQI()) {
1565 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1566 setOperationAction(ISD::SINT_TO_FP, VT, Legal);
1567 setOperationAction(ISD::UINT_TO_FP, VT, Legal);
1568 setOperationAction(ISD::FP_TO_SINT, VT, Legal);
1569 setOperationAction(ISD::FP_TO_UINT, VT, Legal);
1571 setOperationAction(ISD::MUL, VT, Legal);
1575 if (Subtarget.hasCDI()) {
1576 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1577 setOperationAction(ISD::CTLZ, VT, Legal);
1579 } // Subtarget.hasCDI()
1581 if (Subtarget.hasVPOPCNTDQ()) {
1582 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1583 setOperationAction(ISD::CTPOP, VT, Legal);
1587 // This block control legalization of v32i1/v64i1 which are available with
1588 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1590 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1591 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1592 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1594 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1595 setOperationAction(ISD::ADD, VT, Custom);
1596 setOperationAction(ISD::SUB, VT, Custom);
1597 setOperationAction(ISD::MUL, VT, Custom);
1598 setOperationAction(ISD::VSELECT, VT, Expand);
1599 setOperationAction(ISD::UADDSAT, VT, Custom);
1600 setOperationAction(ISD::SADDSAT, VT, Custom);
1601 setOperationAction(ISD::USUBSAT, VT, Custom);
1602 setOperationAction(ISD::SSUBSAT, VT, Custom);
1604 setOperationAction(ISD::TRUNCATE, VT, Custom);
1605 setOperationAction(ISD::SETCC, VT, Custom);
1606 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1607 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1608 setOperationAction(ISD::SELECT, VT, Custom);
1609 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1610 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1613 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1614 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1615 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1616 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1617 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1618 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1620 // Extends from v32i1 masks to 256-bit vectors.
1621 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1622 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1623 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1626 // This block controls legalization for v32i16 and v64i8. 512-bits can be
1627 // disabled based on prefer-vector-width and required-vector-width function
1629 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1630 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1631 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1633 // Extends from v64i1 masks to 512-bit vectors.
1634 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1635 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1636 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1638 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1639 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1640 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1641 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1642 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1643 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1644 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1645 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1646 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1647 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1649 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1650 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1651 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1652 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1653 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1654 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1655 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1656 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1657 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1658 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1659 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1660 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1662 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1663 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1665 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1667 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1668 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1669 setOperationAction(ISD::VSELECT, VT, Custom);
1670 setOperationAction(ISD::ABS, VT, Legal);
1671 setOperationAction(ISD::SRL, VT, Custom);
1672 setOperationAction(ISD::SHL, VT, Custom);
1673 setOperationAction(ISD::SRA, VT, Custom);
1674 setOperationAction(ISD::MLOAD, VT, Legal);
1675 setOperationAction(ISD::MSTORE, VT, Legal);
1676 setOperationAction(ISD::CTPOP, VT, Custom);
1677 setOperationAction(ISD::CTLZ, VT, Custom);
1678 setOperationAction(ISD::SMAX, VT, Legal);
1679 setOperationAction(ISD::UMAX, VT, Legal);
1680 setOperationAction(ISD::SMIN, VT, Legal);
1681 setOperationAction(ISD::UMIN, VT, Legal);
1682 setOperationAction(ISD::SETCC, VT, Custom);
1683 setOperationAction(ISD::UADDSAT, VT, Legal);
1684 setOperationAction(ISD::SADDSAT, VT, Legal);
1685 setOperationAction(ISD::USUBSAT, VT, Legal);
1686 setOperationAction(ISD::SSUBSAT, VT, Legal);
1687 setOperationAction(ISD::SELECT, VT, Custom);
1689 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1690 // setcc all the way to isel and prefer SETGT in some isel patterns.
1691 setCondCodeAction(ISD::SETLT, VT, Custom);
1692 setCondCodeAction(ISD::SETLE, VT, Custom);
1695 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1696 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1699 if (Subtarget.hasBITALG()) {
1700 for (auto VT : { MVT::v64i8, MVT::v32i16 })
1701 setOperationAction(ISD::CTPOP, VT, Legal);
1704 if (Subtarget.hasVBMI2()) {
1705 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1706 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1710 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1711 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1712 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1713 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1716 // These operations are handled on non-VLX by artificially widening in
1718 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1720 if (Subtarget.hasBITALG()) {
1721 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1722 setOperationAction(ISD::CTPOP, VT, Legal);
1726 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1727 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1728 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1729 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1730 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1731 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1733 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1734 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1735 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1736 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1737 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1739 if (Subtarget.hasDQI()) {
1740 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1741 // v2f32 UINT_TO_FP is already custom under SSE2.
1742 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1743 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1744 "Unexpected operation action!");
1745 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1746 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1747 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1750 if (Subtarget.hasBWI()) {
1751 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1752 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1755 if (Subtarget.hasVBMI2()) {
1756 // TODO: Make these legal even without VLX?
1757 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1758 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1759 setOperationAction(ISD::FSHL, VT, Custom);
1760 setOperationAction(ISD::FSHR, VT, Custom);
1764 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1765 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1766 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1769 // We want to custom lower some of our intrinsics.
1770 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1771 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1772 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1773 if (!Subtarget.is64Bit()) {
1774 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1777 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1778 // handle type legalization for these operations here.
1780 // FIXME: We really should do custom legalization for addition and
1781 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1782 // than generic legalization for 64-bit multiplication-with-overflow, though.
1783 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1784 if (VT == MVT::i64 && !Subtarget.is64Bit())
1786 // Add/Sub/Mul with overflow operations are custom lowered.
1787 setOperationAction(ISD::SADDO, VT, Custom);
1788 setOperationAction(ISD::UADDO, VT, Custom);
1789 setOperationAction(ISD::SSUBO, VT, Custom);
1790 setOperationAction(ISD::USUBO, VT, Custom);
1791 setOperationAction(ISD::SMULO, VT, Custom);
1792 setOperationAction(ISD::UMULO, VT, Custom);
1794 // Support carry in as value rather than glue.
1795 setOperationAction(ISD::ADDCARRY, VT, Custom);
1796 setOperationAction(ISD::SUBCARRY, VT, Custom);
1797 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1800 if (!Subtarget.is64Bit()) {
1801 // These libcalls are not available in 32-bit.
1802 setLibcallName(RTLIB::SHL_I128, nullptr);
1803 setLibcallName(RTLIB::SRL_I128, nullptr);
1804 setLibcallName(RTLIB::SRA_I128, nullptr);
1805 setLibcallName(RTLIB::MUL_I128, nullptr);
1808 // Combine sin / cos into _sincos_stret if it is available.
1809 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1810 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1811 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1812 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1815 if (Subtarget.isTargetWin64()) {
1816 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1817 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1818 setOperationAction(ISD::SREM, MVT::i128, Custom);
1819 setOperationAction(ISD::UREM, MVT::i128, Custom);
1820 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1821 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1824 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1825 // is. We should promote the value to 64-bits to solve this.
1826 // This is what the CRT headers do - `fmodf` is an inline header
1827 // function casting to f64 and calling `fmod`.
1828 if (Subtarget.is32Bit() &&
1829 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1830 for (ISD::NodeType Op :
1831 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1832 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1833 if (isOperationExpand(Op, MVT::f32))
1834 setOperationAction(Op, MVT::f32, Promote);
1836 // We have target-specific dag combine patterns for the following nodes:
1837 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1838 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1839 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1840 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1841 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1842 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1843 setTargetDAGCombine(ISD::BITCAST);
1844 setTargetDAGCombine(ISD::VSELECT);
1845 setTargetDAGCombine(ISD::SELECT);
1846 setTargetDAGCombine(ISD::SHL);
1847 setTargetDAGCombine(ISD::SRA);
1848 setTargetDAGCombine(ISD::SRL);
1849 setTargetDAGCombine(ISD::OR);
1850 setTargetDAGCombine(ISD::AND);
1851 setTargetDAGCombine(ISD::ADD);
1852 setTargetDAGCombine(ISD::FADD);
1853 setTargetDAGCombine(ISD::FSUB);
1854 setTargetDAGCombine(ISD::FNEG);
1855 setTargetDAGCombine(ISD::FMA);
1856 setTargetDAGCombine(ISD::FMINNUM);
1857 setTargetDAGCombine(ISD::FMAXNUM);
1858 setTargetDAGCombine(ISD::SUB);
1859 setTargetDAGCombine(ISD::LOAD);
1860 setTargetDAGCombine(ISD::MLOAD);
1861 setTargetDAGCombine(ISD::STORE);
1862 setTargetDAGCombine(ISD::MSTORE);
1863 setTargetDAGCombine(ISD::TRUNCATE);
1864 setTargetDAGCombine(ISD::ZERO_EXTEND);
1865 setTargetDAGCombine(ISD::ANY_EXTEND);
1866 setTargetDAGCombine(ISD::SIGN_EXTEND);
1867 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1868 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1869 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1870 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1871 setTargetDAGCombine(ISD::SINT_TO_FP);
1872 setTargetDAGCombine(ISD::UINT_TO_FP);
1873 setTargetDAGCombine(ISD::SETCC);
1874 setTargetDAGCombine(ISD::MUL);
1875 setTargetDAGCombine(ISD::XOR);
1876 setTargetDAGCombine(ISD::MSCATTER);
1877 setTargetDAGCombine(ISD::MGATHER);
1879 computeRegisterProperties(Subtarget.getRegisterInfo());
1881 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1882 MaxStoresPerMemsetOptSize = 8;
1883 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1884 MaxStoresPerMemcpyOptSize = 4;
1885 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1886 MaxStoresPerMemmoveOptSize = 4;
1888 // TODO: These control memcmp expansion in CGP and could be raised higher, but
1889 // that needs to benchmarked and balanced with the potential use of vector
1890 // load/store types (PR33329, PR33914).
1891 MaxLoadsPerMemcmp = 2;
1892 MaxLoadsPerMemcmpOptSize = 2;
1894 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
1895 setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
1897 // An out-of-order CPU can speculatively execute past a predictable branch,
1898 // but a conditional move could be stalled by an expensive earlier operation.
1899 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1900 EnableExtLdPromotion = true;
1901 setPrefFunctionAlignment(Align(16));
1903 verifyIntrinsicTables();
1906 // This has so far only been implemented for 64-bit MachO.
1907 bool X86TargetLowering::useLoadStackGuardNode() const {
1908 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1911 bool X86TargetLowering::useStackGuardXorFP() const {
1912 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
1913 return Subtarget.getTargetTriple().isOSMSVCRT();
1916 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
1917 const SDLoc &DL) const {
1918 EVT PtrTy = getPointerTy(DAG.getDataLayout());
1919 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
1920 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
1921 return SDValue(Node, 0);
1924 TargetLoweringBase::LegalizeTypeAction
1925 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
1926 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1927 return TypeSplitVector;
1929 if (VT.getVectorNumElements() != 1 &&
1930 VT.getVectorElementType() != MVT::i1)
1931 return TypeWidenVector;
1933 return TargetLoweringBase::getPreferredVectorAction(VT);
1936 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1939 // v32i1 vectors should be promoted to v32i8 to match avx2.
1940 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1942 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1943 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1944 Subtarget.hasAVX512() &&
1945 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1946 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1947 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
1949 // FIXME: Should we just make these types legal and custom split operations?
1950 if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
1951 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
1953 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1956 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1959 // v32i1 vectors should be promoted to v32i8 to match avx2.
1960 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1962 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1963 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1964 Subtarget.hasAVX512() &&
1965 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1966 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1967 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
1968 return VT.getVectorNumElements();
1969 // FIXME: Should we just make these types legal and custom split operations?
1970 if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
1971 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
1973 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1976 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
1977 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1978 unsigned &NumIntermediates, MVT &RegisterVT) const {
1979 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1980 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1981 Subtarget.hasAVX512() &&
1982 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1983 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1984 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
1985 RegisterVT = MVT::i8;
1986 IntermediateVT = MVT::i1;
1987 NumIntermediates = VT.getVectorNumElements();
1988 return NumIntermediates;
1991 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
1992 NumIntermediates, RegisterVT);
1995 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1996 LLVMContext& Context,
2001 if (Subtarget.hasAVX512()) {
2002 const unsigned NumElts = VT.getVectorNumElements();
2004 // Figure out what this type will be legalized to.
2006 while (getTypeAction(Context, LegalVT) != TypeLegal)
2007 LegalVT = getTypeToTransformTo(Context, LegalVT);
2009 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2010 if (LegalVT.getSimpleVT().is512BitVector())
2011 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2013 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2014 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2015 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2017 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2018 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2019 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2023 return VT.changeVectorElementTypeToInteger();
2026 /// Helper for getByValTypeAlignment to determine
2027 /// the desired ByVal argument alignment.
2028 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2031 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2032 if (VTy->getBitWidth() == 128)
2034 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2035 unsigned EltAlign = 0;
2036 getMaxByValAlign(ATy->getElementType(), EltAlign);
2037 if (EltAlign > MaxAlign)
2038 MaxAlign = EltAlign;
2039 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2040 for (auto *EltTy : STy->elements()) {
2041 unsigned EltAlign = 0;
2042 getMaxByValAlign(EltTy, EltAlign);
2043 if (EltAlign > MaxAlign)
2044 MaxAlign = EltAlign;
2051 /// Return the desired alignment for ByVal aggregate
2052 /// function arguments in the caller parameter area. For X86, aggregates
2053 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2054 /// are at 4-byte boundaries.
2055 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2056 const DataLayout &DL) const {
2057 if (Subtarget.is64Bit()) {
2058 // Max of 8 and alignment of type.
2059 unsigned TyAlign = DL.getABITypeAlignment(Ty);
2066 if (Subtarget.hasSSE1())
2067 getMaxByValAlign(Ty, Align);
2071 /// Returns the target specific optimal type for load
2072 /// and store operations as a result of memset, memcpy, and memmove
2073 /// lowering. If DstAlign is zero that means it's safe to destination
2074 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2075 /// means there isn't a need to check it against alignment requirement,
2076 /// probably because the source does not need to be loaded. If 'IsMemset' is
2077 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2078 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2079 /// source is constant so it does not need to be loaded.
2080 /// It returns EVT::Other if the type should be determined using generic
2081 /// target-independent logic.
2082 /// For vector ops we check that the overall size isn't larger than our
2083 /// preferred vector width.
2084 EVT X86TargetLowering::getOptimalMemOpType(
2085 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2086 bool ZeroMemset, bool MemcpyStrSrc,
2087 const AttributeList &FuncAttributes) const {
2088 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2089 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2090 ((DstAlign == 0 || DstAlign >= 16) &&
2091 (SrcAlign == 0 || SrcAlign >= 16)))) {
2092 // FIXME: Check if unaligned 64-byte accesses are slow.
2093 if (Size >= 64 && Subtarget.hasAVX512() &&
2094 (Subtarget.getPreferVectorWidth() >= 512)) {
2095 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2097 // FIXME: Check if unaligned 32-byte accesses are slow.
2098 if (Size >= 32 && Subtarget.hasAVX() &&
2099 (Subtarget.getPreferVectorWidth() >= 256)) {
2100 // Although this isn't a well-supported type for AVX1, we'll let
2101 // legalization and shuffle lowering produce the optimal codegen. If we
2102 // choose an optimal type with a vector element larger than a byte,
2103 // getMemsetStores() may create an intermediate splat (using an integer
2104 // multiply) before we splat as a vector.
2107 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2109 // TODO: Can SSE1 handle a byte vector?
2110 // If we have SSE1 registers we should be able to use them.
2111 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2112 (Subtarget.getPreferVectorWidth() >= 128))
2114 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2115 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2116 // Do not use f64 to lower memcpy if source is string constant. It's
2117 // better to use i32 to avoid the loads.
2118 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2119 // The gymnastics of splatting a byte value into an XMM register and then
2120 // only using 8-byte stores (because this is a CPU with slow unaligned
2121 // 16-byte accesses) makes that a loser.
2125 // This is a compromise. If we reach here, unaligned accesses may be slow on
2126 // this target. However, creating smaller, aligned accesses could be even
2127 // slower and would certainly be a lot more code.
2128 if (Subtarget.is64Bit() && Size >= 8)
2133 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2135 return X86ScalarSSEf32;
2136 else if (VT == MVT::f64)
2137 return X86ScalarSSEf64;
2141 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2142 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2145 switch (VT.getSizeInBits()) {
2147 // 8-byte and under are always assumed to be fast.
2151 *Fast = !Subtarget.isUnalignedMem16Slow();
2154 *Fast = !Subtarget.isUnalignedMem32Slow();
2156 // TODO: What about AVX-512 (512-bit) accesses?
2159 // NonTemporal vector memory ops must be aligned.
2160 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2161 // NT loads can only be vector aligned, so if its less aligned than the
2162 // minimum vector size (which we can split the vector down to), we might as
2163 // well use a regular unaligned vector load.
2164 // We don't have any NT loads pre-SSE41.
2165 if (!!(Flags & MachineMemOperand::MOLoad))
2166 return (Align < 16 || !Subtarget.hasSSE41());
2169 // Misaligned accesses of any size are always allowed.
2173 /// Return the entry encoding for a jump table in the
2174 /// current function. The returned value is a member of the
2175 /// MachineJumpTableInfo::JTEntryKind enum.
2176 unsigned X86TargetLowering::getJumpTableEncoding() const {
2177 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2179 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2180 return MachineJumpTableInfo::EK_Custom32;
2182 // Otherwise, use the normal jump table encoding heuristics.
2183 return TargetLowering::getJumpTableEncoding();
2186 bool X86TargetLowering::useSoftFloat() const {
2187 return Subtarget.useSoftFloat();
2190 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2191 ArgListTy &Args) const {
2193 // Only relabel X86-32 for C / Stdcall CCs.
2194 if (Subtarget.is64Bit())
2196 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2198 unsigned ParamRegs = 0;
2199 if (auto *M = MF->getFunction().getParent())
2200 ParamRegs = M->getNumberRegisterParameters();
2202 // Mark the first N int arguments as having reg
2203 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2204 Type *T = Args[Idx].Ty;
2205 if (T->isIntOrPtrTy())
2206 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2207 unsigned numRegs = 1;
2208 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2210 if (ParamRegs < numRegs)
2212 ParamRegs -= numRegs;
2213 Args[Idx].IsInReg = true;
2219 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2220 const MachineBasicBlock *MBB,
2221 unsigned uid,MCContext &Ctx) const{
2222 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2223 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2225 return MCSymbolRefExpr::create(MBB->getSymbol(),
2226 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2229 /// Returns relocation base for the given PIC jumptable.
2230 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2231 SelectionDAG &DAG) const {
2232 if (!Subtarget.is64Bit())
2233 // This doesn't have SDLoc associated with it, but is not really the
2234 // same as a Register.
2235 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2236 getPointerTy(DAG.getDataLayout()));
2240 /// This returns the relocation base for the given PIC jumptable,
2241 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2242 const MCExpr *X86TargetLowering::
2243 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2244 MCContext &Ctx) const {
2245 // X86-64 uses RIP relative addressing based on the jump table label.
2246 if (Subtarget.isPICStyleRIPRel())
2247 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2249 // Otherwise, the reference is relative to the PIC base.
2250 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2253 std::pair<const TargetRegisterClass *, uint8_t>
2254 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2256 const TargetRegisterClass *RRC = nullptr;
2258 switch (VT.SimpleTy) {
2260 return TargetLowering::findRepresentativeClass(TRI, VT);
2261 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2262 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2265 RRC = &X86::VR64RegClass;
2267 case MVT::f32: case MVT::f64:
2268 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2269 case MVT::v4f32: case MVT::v2f64:
2270 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2271 case MVT::v8f32: case MVT::v4f64:
2272 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2273 case MVT::v16f32: case MVT::v8f64:
2274 RRC = &X86::VR128XRegClass;
2277 return std::make_pair(RRC, Cost);
2280 unsigned X86TargetLowering::getAddressSpace() const {
2281 if (Subtarget.is64Bit())
2282 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2286 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2287 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2288 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2291 static Constant* SegmentOffset(IRBuilder<> &IRB,
2292 unsigned Offset, unsigned AddressSpace) {
2293 return ConstantExpr::getIntToPtr(
2294 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2295 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2298 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2299 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2300 // tcbhead_t; use it instead of the usual global variable (see
2301 // sysdeps/{i386,x86_64}/nptl/tls.h)
2302 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2303 if (Subtarget.isTargetFuchsia()) {
2304 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2305 return SegmentOffset(IRB, 0x10, getAddressSpace());
2307 // %fs:0x28, unless we're using a Kernel code model, in which case
2308 // it's %gs:0x28. gs:0x14 on i386.
2309 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2310 return SegmentOffset(IRB, Offset, getAddressSpace());
2314 return TargetLowering::getIRStackGuard(IRB);
2317 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2318 // MSVC CRT provides functionalities for stack protection.
2319 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2320 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2321 // MSVC CRT has a global variable holding security cookie.
2322 M.getOrInsertGlobal("__security_cookie",
2323 Type::getInt8PtrTy(M.getContext()));
2325 // MSVC CRT has a function to validate security cookie.
2326 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2327 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2328 Type::getInt8PtrTy(M.getContext()));
2329 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2330 F->setCallingConv(CallingConv::X86_FastCall);
2331 F->addAttribute(1, Attribute::AttrKind::InReg);
2335 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2336 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2338 TargetLowering::insertSSPDeclarations(M);
2341 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2342 // MSVC CRT has a global variable holding security cookie.
2343 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2344 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2345 return M.getGlobalVariable("__security_cookie");
2347 return TargetLowering::getSDagStackGuard(M);
2350 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2351 // MSVC CRT has a function to validate security cookie.
2352 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2353 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2354 return M.getFunction("__security_check_cookie");
2356 return TargetLowering::getSSPStackGuardCheck(M);
2359 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2360 if (Subtarget.getTargetTriple().isOSContiki())
2361 return getDefaultSafeStackPointerLocation(IRB, false);
2363 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2364 // definition of TLS_SLOT_SAFESTACK in
2365 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2366 if (Subtarget.isTargetAndroid()) {
2367 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2369 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2370 return SegmentOffset(IRB, Offset, getAddressSpace());
2373 // Fuchsia is similar.
2374 if (Subtarget.isTargetFuchsia()) {
2375 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2376 return SegmentOffset(IRB, 0x18, getAddressSpace());
2379 return TargetLowering::getSafeStackPointerLocation(IRB);
2382 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2383 unsigned DestAS) const {
2384 assert(SrcAS != DestAS && "Expected different address spaces!");
2386 return SrcAS < 256 && DestAS < 256;
2389 //===----------------------------------------------------------------------===//
2390 // Return Value Calling Convention Implementation
2391 //===----------------------------------------------------------------------===//
2393 bool X86TargetLowering::CanLowerReturn(
2394 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2395 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2396 SmallVector<CCValAssign, 16> RVLocs;
2397 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2398 return CCInfo.CheckReturn(Outs, RetCC_X86);
2401 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2402 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2406 /// Lowers masks values (v*i1) to the local register values
2407 /// \returns DAG node after lowering to register type
2408 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2409 const SDLoc &Dl, SelectionDAG &DAG) {
2410 EVT ValVT = ValArg.getValueType();
2412 if (ValVT == MVT::v1i1)
2413 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2414 DAG.getIntPtrConstant(0, Dl));
2416 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2417 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2418 // Two stage lowering might be required
2419 // bitcast: v8i1 -> i8 / v16i1 -> i16
2420 // anyextend: i8 -> i32 / i16 -> i32
2421 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2422 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2423 if (ValLoc == MVT::i32)
2424 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2428 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2429 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2430 // One stage lowering is required
2431 // bitcast: v32i1 -> i32 / v64i1 -> i64
2432 return DAG.getBitcast(ValLoc, ValArg);
2435 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2438 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2439 static void Passv64i1ArgInRegs(
2440 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2441 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
2442 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2443 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2444 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2445 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2446 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2447 "The value should reside in two registers");
2449 // Before splitting the value we cast it to i64
2450 Arg = DAG.getBitcast(MVT::i64, Arg);
2452 // Splitting the value into two i32 types
2454 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2455 DAG.getConstant(0, Dl, MVT::i32));
2456 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2457 DAG.getConstant(1, Dl, MVT::i32));
2459 // Attach the two i32 types into corresponding registers
2460 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2461 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2465 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2467 const SmallVectorImpl<ISD::OutputArg> &Outs,
2468 const SmallVectorImpl<SDValue> &OutVals,
2469 const SDLoc &dl, SelectionDAG &DAG) const {
2470 MachineFunction &MF = DAG.getMachineFunction();
2471 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2473 // In some cases we need to disable registers from the default CSR list.
2474 // For example, when they are used for argument passing.
2475 bool ShouldDisableCalleeSavedRegister =
2476 CallConv == CallingConv::X86_RegCall ||
2477 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2479 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2480 report_fatal_error("X86 interrupts may not return any value");
2482 SmallVector<CCValAssign, 16> RVLocs;
2483 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2484 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2487 SmallVector<SDValue, 6> RetOps;
2488 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2489 // Operand #1 = Bytes To Pop
2490 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2493 // Copy the result values into the output registers.
2494 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2496 CCValAssign &VA = RVLocs[I];
2497 assert(VA.isRegLoc() && "Can only return in registers!");
2499 // Add the register to the CalleeSaveDisableRegs list.
2500 if (ShouldDisableCalleeSavedRegister)
2501 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2503 SDValue ValToCopy = OutVals[OutsIndex];
2504 EVT ValVT = ValToCopy.getValueType();
2506 // Promote values to the appropriate types.
2507 if (VA.getLocInfo() == CCValAssign::SExt)
2508 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2509 else if (VA.getLocInfo() == CCValAssign::ZExt)
2510 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2511 else if (VA.getLocInfo() == CCValAssign::AExt) {
2512 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2513 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2515 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2517 else if (VA.getLocInfo() == CCValAssign::BCvt)
2518 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2520 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2521 "Unexpected FP-extend for return value.");
2523 // If this is x86-64, and we disabled SSE, we can't return FP values,
2524 // or SSE or MMX vectors.
2525 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2526 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2527 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2528 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2529 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2530 } else if (ValVT == MVT::f64 &&
2531 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
2532 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2533 // llvm-gcc has never done it right and no one has noticed, so this
2534 // should be OK for now.
2535 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2536 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2539 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2540 // the RET instruction and handled by the FP Stackifier.
2541 if (VA.getLocReg() == X86::FP0 ||
2542 VA.getLocReg() == X86::FP1) {
2543 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2544 // change the value to the FP stack register class.
2545 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2546 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2547 RetOps.push_back(ValToCopy);
2548 // Don't emit a copytoreg.
2552 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2553 // which is returned in RAX / RDX.
2554 if (Subtarget.is64Bit()) {
2555 if (ValVT == MVT::x86mmx) {
2556 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2557 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2558 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2560 // If we don't have SSE2 available, convert to v4f32 so the generated
2561 // register is legal.
2562 if (!Subtarget.hasSSE2())
2563 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2568 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2570 if (VA.needsCustom()) {
2571 assert(VA.getValVT() == MVT::v64i1 &&
2572 "Currently the only custom case is when we split v64i1 to 2 regs");
2574 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
2577 assert(2 == RegsToPass.size() &&
2578 "Expecting two registers after Pass64BitArgInRegs");
2580 // Add the second register to the CalleeSaveDisableRegs list.
2581 if (ShouldDisableCalleeSavedRegister)
2582 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2584 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2587 // Add nodes to the DAG and add the values into the RetOps list
2588 for (auto &Reg : RegsToPass) {
2589 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2590 Flag = Chain.getValue(1);
2591 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2595 // Swift calling convention does not require we copy the sret argument
2596 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2598 // All x86 ABIs require that for returning structs by value we copy
2599 // the sret argument into %rax/%eax (depending on ABI) for the return.
2600 // We saved the argument into a virtual register in the entry block,
2601 // so now we copy the value out and into %rax/%eax.
2603 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2604 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2605 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2606 // either case FuncInfo->setSRetReturnReg() will have been called.
2607 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2608 // When we have both sret and another return value, we should use the
2609 // original Chain stored in RetOps[0], instead of the current Chain updated
2610 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2612 // For the case of sret and another return value, we have
2613 // Chain_0 at the function entry
2614 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2615 // If we use Chain_1 in getCopyFromReg, we will have
2616 // Val = getCopyFromReg(Chain_1)
2617 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2619 // getCopyToReg(Chain_0) will be glued together with
2620 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2621 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2622 // Data dependency from Unit B to Unit A due to usage of Val in
2623 // getCopyToReg(Chain_1, Val)
2624 // Chain dependency from Unit A to Unit B
2626 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2627 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2628 getPointerTy(MF.getDataLayout()));
2631 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2632 X86::RAX : X86::EAX;
2633 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2634 Flag = Chain.getValue(1);
2636 // RAX/EAX now acts like a return value.
2638 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2640 // Add the returned register to the CalleeSaveDisableRegs list.
2641 if (ShouldDisableCalleeSavedRegister)
2642 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2645 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2646 const MCPhysReg *I =
2647 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2650 if (X86::GR64RegClass.contains(*I))
2651 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2653 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2657 RetOps[0] = Chain; // Update chain.
2659 // Add the flag if we have it.
2661 RetOps.push_back(Flag);
2663 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2664 if (CallConv == CallingConv::X86_INTR)
2665 opcode = X86ISD::IRET;
2666 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2669 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2670 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2673 SDValue TCChain = Chain;
2674 SDNode *Copy = *N->use_begin();
2675 if (Copy->getOpcode() == ISD::CopyToReg) {
2676 // If the copy has a glue operand, we conservatively assume it isn't safe to
2677 // perform a tail call.
2678 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2680 TCChain = Copy->getOperand(0);
2681 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2684 bool HasRet = false;
2685 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2687 if (UI->getOpcode() != X86ISD::RET_FLAG)
2689 // If we are returning more than one value, we can definitely
2690 // not make a tail call see PR19530
2691 if (UI->getNumOperands() > 4)
2693 if (UI->getNumOperands() == 4 &&
2694 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2706 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2707 ISD::NodeType ExtendKind) const {
2708 MVT ReturnMVT = MVT::i32;
2710 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2711 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2712 // The ABI does not require i1, i8 or i16 to be extended.
2714 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2715 // always extending i8/i16 return values, so keep doing that for now.
2717 ReturnMVT = MVT::i8;
2720 EVT MinVT = getRegisterType(Context, ReturnMVT);
2721 return VT.bitsLT(MinVT) ? MinVT : VT;
2724 /// Reads two 32 bit registers and creates a 64 bit mask value.
2725 /// \param VA The current 32 bit value that need to be assigned.
2726 /// \param NextVA The next 32 bit value that need to be assigned.
2727 /// \param Root The parent DAG node.
2728 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2729 /// glue purposes. In the case the DAG is already using
2730 /// physical register instead of virtual, we should glue
2731 /// our new SDValue to InFlag SDvalue.
2732 /// \return a new SDvalue of size 64bit.
2733 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2734 SDValue &Root, SelectionDAG &DAG,
2735 const SDLoc &Dl, const X86Subtarget &Subtarget,
2736 SDValue *InFlag = nullptr) {
2737 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2738 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2739 assert(VA.getValVT() == MVT::v64i1 &&
2740 "Expecting first location of 64 bit width type");
2741 assert(NextVA.getValVT() == VA.getValVT() &&
2742 "The locations should have the same type");
2743 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2744 "The values should reside in two registers");
2747 SDValue ArgValueLo, ArgValueHi;
2749 MachineFunction &MF = DAG.getMachineFunction();
2750 const TargetRegisterClass *RC = &X86::GR32RegClass;
2752 // Read a 32 bit value from the registers.
2753 if (nullptr == InFlag) {
2754 // When no physical register is present,
2755 // create an intermediate virtual register.
2756 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2757 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2758 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2759 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2761 // When a physical register is available read the value from it and glue
2762 // the reads together.
2764 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2765 *InFlag = ArgValueLo.getValue(2);
2767 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2768 *InFlag = ArgValueHi.getValue(2);
2771 // Convert the i32 type into v32i1 type.
2772 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2774 // Convert the i32 type into v32i1 type.
2775 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2777 // Concatenate the two values together.
2778 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2781 /// The function will lower a register of various sizes (8/16/32/64)
2782 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2783 /// \returns a DAG node contains the operand after lowering to mask type.
2784 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2785 const EVT &ValLoc, const SDLoc &Dl,
2786 SelectionDAG &DAG) {
2787 SDValue ValReturned = ValArg;
2789 if (ValVT == MVT::v1i1)
2790 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2792 if (ValVT == MVT::v64i1) {
2793 // In 32 bit machine, this case is handled by getv64i1Argument
2794 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2795 // In 64 bit machine, There is no need to truncate the value only bitcast
2798 switch (ValVT.getSimpleVT().SimpleTy) {
2809 llvm_unreachable("Expecting a vector of i1 types");
2812 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2814 return DAG.getBitcast(ValVT, ValReturned);
2817 /// Lower the result values of a call into the
2818 /// appropriate copies out of appropriate physical registers.
2820 SDValue X86TargetLowering::LowerCallResult(
2821 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2822 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2823 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2824 uint32_t *RegMask) const {
2826 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2827 // Assign locations to each value returned by this call.
2828 SmallVector<CCValAssign, 16> RVLocs;
2829 bool Is64Bit = Subtarget.is64Bit();
2830 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2832 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2834 // Copy all of the result registers out of their specified physreg.
2835 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2837 CCValAssign &VA = RVLocs[I];
2838 EVT CopyVT = VA.getLocVT();
2840 // In some calling conventions we need to remove the used registers
2841 // from the register mask.
2843 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2844 SubRegs.isValid(); ++SubRegs)
2845 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
2848 // If this is x86-64, and we disabled SSE, we can't return FP values
2849 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2850 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2851 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2852 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2853 } else if (CopyVT == MVT::f64 &&
2854 (Is64Bit && !Subtarget.hasSSE2())) {
2855 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2856 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2859 // If we prefer to use the value in xmm registers, copy it out as f80 and
2860 // use a truncate to move it from fp stack reg to xmm reg.
2861 bool RoundAfterCopy = false;
2862 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2863 isScalarFPTypeInSSEReg(VA.getValVT())) {
2864 if (!Subtarget.hasX87())
2865 report_fatal_error("X87 register return with X87 disabled");
2867 RoundAfterCopy = (CopyVT != VA.getLocVT());
2871 if (VA.needsCustom()) {
2872 assert(VA.getValVT() == MVT::v64i1 &&
2873 "Currently the only custom case is when we split v64i1 to 2 regs");
2875 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
2877 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
2879 Val = Chain.getValue(0);
2880 InFlag = Chain.getValue(2);
2884 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2885 // This truncation won't change the value.
2886 DAG.getIntPtrConstant(1, dl));
2888 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
2889 if (VA.getValVT().isVector() &&
2890 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2891 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2892 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2893 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
2895 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2898 InVals.push_back(Val);
2904 //===----------------------------------------------------------------------===//
2905 // C & StdCall & Fast Calling Convention implementation
2906 //===----------------------------------------------------------------------===//
2907 // StdCall calling convention seems to be standard for many Windows' API
2908 // routines and around. It differs from C calling convention just a little:
2909 // callee should clean up the stack, not caller. Symbols should be also
2910 // decorated in some fancy way :) It doesn't support any vector arguments.
2911 // For info on fast calling convention see Fast Calling Convention (tail call)
2912 // implementation LowerX86_32FastCCCallTo.
2914 /// CallIsStructReturn - Determines whether a call uses struct return
2916 enum StructReturnType {
2921 static StructReturnType
2922 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
2924 return NotStructReturn;
2926 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2927 if (!Flags.isSRet())
2928 return NotStructReturn;
2929 if (Flags.isInReg() || IsMCU)
2930 return RegStructReturn;
2931 return StackStructReturn;
2934 /// Determines whether a function uses struct return semantics.
2935 static StructReturnType
2936 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
2938 return NotStructReturn;
2940 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2941 if (!Flags.isSRet())
2942 return NotStructReturn;
2943 if (Flags.isInReg() || IsMCU)
2944 return RegStructReturn;
2945 return StackStructReturn;
2948 /// Make a copy of an aggregate at address specified by "Src" to address
2949 /// "Dst" with size and alignment information specified by the specific
2950 /// parameter attribute. The copy will be passed as a byval function parameter.
2951 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2952 SDValue Chain, ISD::ArgFlagsTy Flags,
2953 SelectionDAG &DAG, const SDLoc &dl) {
2954 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2956 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2957 /*isVolatile*/false, /*AlwaysInline=*/true,
2958 /*isTailCall*/false,
2959 MachinePointerInfo(), MachinePointerInfo());
2962 /// Return true if the calling convention is one that we can guarantee TCO for.
2963 static bool canGuaranteeTCO(CallingConv::ID CC) {
2964 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2965 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
2966 CC == CallingConv::HHVM || CC == CallingConv::Tail);
2969 /// Return true if we might ever do TCO for calls with this calling convention.
2970 static bool mayTailCallThisCC(CallingConv::ID CC) {
2972 // C calling conventions:
2973 case CallingConv::C:
2974 case CallingConv::Win64:
2975 case CallingConv::X86_64_SysV:
2976 // Callee pop conventions:
2977 case CallingConv::X86_ThisCall:
2978 case CallingConv::X86_StdCall:
2979 case CallingConv::X86_VectorCall:
2980 case CallingConv::X86_FastCall:
2982 case CallingConv::Swift:
2985 return canGuaranteeTCO(CC);
2989 /// Return true if the function is being made into a tailcall target by
2990 /// changing its ABI.
2991 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2992 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
2995 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2997 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2998 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
3001 ImmutableCallSite CS(CI);
3002 CallingConv::ID CalleeCC = CS.getCallingConv();
3003 if (!mayTailCallThisCC(CalleeCC))
3010 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3011 const SmallVectorImpl<ISD::InputArg> &Ins,
3012 const SDLoc &dl, SelectionDAG &DAG,
3013 const CCValAssign &VA,
3014 MachineFrameInfo &MFI, unsigned i) const {
3015 // Create the nodes corresponding to a load from this parameter slot.
3016 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3017 bool AlwaysUseMutable = shouldGuaranteeTCO(
3018 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3019 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3021 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3023 // If value is passed by pointer we have address passed instead of the value
3024 // itself. No need to extend if the mask value and location share the same
3026 bool ExtendedInMem =
3027 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3028 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3030 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3031 ValVT = VA.getLocVT();
3033 ValVT = VA.getValVT();
3035 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3036 // changed with more analysis.
3037 // In case of tail call optimization mark all arguments mutable. Since they
3038 // could be overwritten by lowering of arguments in case of a tail call.
3039 if (Flags.isByVal()) {
3040 unsigned Bytes = Flags.getByValSize();
3041 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3043 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3044 // can be improved with deeper analysis.
3045 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3046 /*isAliased=*/true);
3047 return DAG.getFrameIndex(FI, PtrVT);
3050 // This is an argument in memory. We might be able to perform copy elision.
3051 // If the argument is passed directly in memory without any extension, then we
3052 // can perform copy elision. Large vector types, for example, may be passed
3053 // indirectly by pointer.
3054 if (Flags.isCopyElisionCandidate() &&
3055 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3056 EVT ArgVT = Ins[i].ArgVT;
3058 if (Ins[i].PartOffset == 0) {
3059 // If this is a one-part value or the first part of a multi-part value,
3060 // create a stack object for the entire argument value type and return a
3061 // load from our portion of it. This assumes that if the first part of an
3062 // argument is in memory, the rest will also be in memory.
3063 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3064 /*IsImmutable=*/false);
3065 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3067 ValVT, dl, Chain, PartAddr,
3068 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3070 // This is not the first piece of an argument in memory. See if there is
3071 // already a fixed stack object including this offset. If so, assume it
3072 // was created by the PartOffset == 0 branch above and create a load from
3073 // the appropriate offset into it.
3074 int64_t PartBegin = VA.getLocMemOffset();
3075 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3076 int FI = MFI.getObjectIndexBegin();
3077 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3078 int64_t ObjBegin = MFI.getObjectOffset(FI);
3079 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3080 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3083 if (MFI.isFixedObjectIndex(FI)) {
3085 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3086 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3088 ValVT, dl, Chain, Addr,
3089 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3090 Ins[i].PartOffset));
3095 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3096 VA.getLocMemOffset(), isImmutable);
3098 // Set SExt or ZExt flag.
3099 if (VA.getLocInfo() == CCValAssign::ZExt) {
3100 MFI.setObjectZExt(FI, true);
3101 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3102 MFI.setObjectSExt(FI, true);
3105 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3106 SDValue Val = DAG.getLoad(
3107 ValVT, dl, Chain, FIN,
3108 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3109 return ExtendedInMem
3110 ? (VA.getValVT().isVector()
3111 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3112 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3116 // FIXME: Get this from tablegen.
3117 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3118 const X86Subtarget &Subtarget) {
3119 assert(Subtarget.is64Bit());
3121 if (Subtarget.isCallingConvWin64(CallConv)) {
3122 static const MCPhysReg GPR64ArgRegsWin64[] = {
3123 X86::RCX, X86::RDX, X86::R8, X86::R9
3125 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3128 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3129 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3131 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3134 // FIXME: Get this from tablegen.
3135 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3136 CallingConv::ID CallConv,
3137 const X86Subtarget &Subtarget) {
3138 assert(Subtarget.is64Bit());
3139 if (Subtarget.isCallingConvWin64(CallConv)) {
3140 // The XMM registers which might contain var arg parameters are shadowed
3141 // in their paired GPR. So we only need to save the GPR to their home
3143 // TODO: __vectorcall will change this.
3147 const Function &F = MF.getFunction();
3148 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3149 bool isSoftFloat = Subtarget.useSoftFloat();
3150 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3151 "SSE register cannot be used when SSE is disabled!");
3152 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3153 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3157 static const MCPhysReg XMMArgRegs64Bit[] = {
3158 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3159 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3161 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3165 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3166 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3167 [](const CCValAssign &A, const CCValAssign &B) -> bool {
3168 return A.getValNo() < B.getValNo();
3173 SDValue X86TargetLowering::LowerFormalArguments(
3174 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3175 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3176 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3177 MachineFunction &MF = DAG.getMachineFunction();
3178 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3179 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3181 const Function &F = MF.getFunction();
3182 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3183 F.getName() == "main")
3184 FuncInfo->setForceFramePointer(true);
3186 MachineFrameInfo &MFI = MF.getFrameInfo();
3187 bool Is64Bit = Subtarget.is64Bit();
3188 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3191 !(isVarArg && canGuaranteeTCO(CallConv)) &&
3192 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3194 // Assign locations to all of the incoming arguments.
3195 SmallVector<CCValAssign, 16> ArgLocs;
3196 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3198 // Allocate shadow area for Win64.
3200 CCInfo.AllocateStack(32, 8);
3202 CCInfo.AnalyzeArguments(Ins, CC_X86);
3204 // In vectorcall calling convention a second pass is required for the HVA
3206 if (CallingConv::X86_VectorCall == CallConv) {
3207 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3210 // The next loop assumes that the locations are in the same order of the
3212 assert(isSortedByValueNo(ArgLocs) &&
3213 "Argument Location list must be sorted before lowering");
3216 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3218 assert(InsIndex < Ins.size() && "Invalid Ins index");
3219 CCValAssign &VA = ArgLocs[I];
3221 if (VA.isRegLoc()) {
3222 EVT RegVT = VA.getLocVT();
3223 if (VA.needsCustom()) {
3225 VA.getValVT() == MVT::v64i1 &&
3226 "Currently the only custom case is when we split v64i1 to 2 regs");
3228 // v64i1 values, in regcall calling convention, that are
3229 // compiled to 32 bit arch, are split up into two registers.
3231 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3233 const TargetRegisterClass *RC;
3234 if (RegVT == MVT::i8)
3235 RC = &X86::GR8RegClass;
3236 else if (RegVT == MVT::i16)
3237 RC = &X86::GR16RegClass;
3238 else if (RegVT == MVT::i32)
3239 RC = &X86::GR32RegClass;
3240 else if (Is64Bit && RegVT == MVT::i64)
3241 RC = &X86::GR64RegClass;
3242 else if (RegVT == MVT::f32)
3243 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3244 else if (RegVT == MVT::f64)
3245 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3246 else if (RegVT == MVT::f80)
3247 RC = &X86::RFP80RegClass;
3248 else if (RegVT == MVT::f128)
3249 RC = &X86::VR128RegClass;
3250 else if (RegVT.is512BitVector())
3251 RC = &X86::VR512RegClass;
3252 else if (RegVT.is256BitVector())
3253 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3254 else if (RegVT.is128BitVector())
3255 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3256 else if (RegVT == MVT::x86mmx)
3257 RC = &X86::VR64RegClass;
3258 else if (RegVT == MVT::v1i1)
3259 RC = &X86::VK1RegClass;
3260 else if (RegVT == MVT::v8i1)
3261 RC = &X86::VK8RegClass;
3262 else if (RegVT == MVT::v16i1)
3263 RC = &X86::VK16RegClass;
3264 else if (RegVT == MVT::v32i1)
3265 RC = &X86::VK32RegClass;
3266 else if (RegVT == MVT::v64i1)
3267 RC = &X86::VK64RegClass;
3269 llvm_unreachable("Unknown argument type!");
3271 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3272 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3275 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3276 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3278 if (VA.getLocInfo() == CCValAssign::SExt)
3279 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3280 DAG.getValueType(VA.getValVT()));
3281 else if (VA.getLocInfo() == CCValAssign::ZExt)
3282 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3283 DAG.getValueType(VA.getValVT()));
3284 else if (VA.getLocInfo() == CCValAssign::BCvt)
3285 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3287 if (VA.isExtInLoc()) {
3288 // Handle MMX values passed in XMM regs.
3289 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3290 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3291 else if (VA.getValVT().isVector() &&
3292 VA.getValVT().getScalarType() == MVT::i1 &&
3293 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3294 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3295 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3296 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3298 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3301 assert(VA.isMemLoc());
3303 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3306 // If value is passed via pointer - do a load.
3307 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3309 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3311 InVals.push_back(ArgValue);
3314 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3315 // Swift calling convention does not require we copy the sret argument
3316 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3317 if (CallConv == CallingConv::Swift)
3320 // All x86 ABIs require that for returning structs by value we copy the
3321 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3322 // the argument into a virtual register so that we can access it from the
3324 if (Ins[I].Flags.isSRet()) {
3325 unsigned Reg = FuncInfo->getSRetReturnReg();
3327 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3328 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3329 FuncInfo->setSRetReturnReg(Reg);
3331 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3332 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3337 unsigned StackSize = CCInfo.getNextStackOffset();
3338 // Align stack specially for tail calls.
3339 if (shouldGuaranteeTCO(CallConv,
3340 MF.getTarget().Options.GuaranteedTailCallOpt))
3341 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3343 // If the function takes variable number of arguments, make a frame index for
3344 // the start of the first vararg value... for expansion of llvm.va_start. We
3345 // can skip this if there are no va_start calls.
3346 if (MFI.hasVAStart() &&
3347 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3348 CallConv != CallingConv::X86_ThisCall))) {
3349 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3352 // Figure out if XMM registers are in use.
3353 assert(!(Subtarget.useSoftFloat() &&
3354 F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3355 "SSE register cannot be used when SSE is disabled!");
3357 // 64-bit calling conventions support varargs and register parameters, so we
3358 // have to do extra work to spill them in the prologue.
3359 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3360 // Find the first unallocated argument registers.
3361 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3362 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3363 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3364 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3365 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3366 "SSE register cannot be used when SSE is disabled!");
3368 // Gather all the live in physical registers.
3369 SmallVector<SDValue, 6> LiveGPRs;
3370 SmallVector<SDValue, 8> LiveXMMRegs;
3372 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3373 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3375 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3377 if (!ArgXMMs.empty()) {
3378 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3379 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3380 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3381 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3382 LiveXMMRegs.push_back(
3383 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3388 // Get to the caller-allocated home save location. Add 8 to account
3389 // for the return address.
3390 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3391 FuncInfo->setRegSaveFrameIndex(
3392 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3393 // Fixup to set vararg frame on shadow area (4 x i64).
3395 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3397 // For X86-64, if there are vararg parameters that are passed via
3398 // registers, then we must store them to their spots on the stack so
3399 // they may be loaded by dereferencing the result of va_next.
3400 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3401 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3402 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3403 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3406 // Store the integer parameter registers.
3407 SmallVector<SDValue, 8> MemOps;
3408 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3409 getPointerTy(DAG.getDataLayout()));
3410 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3411 for (SDValue Val : LiveGPRs) {
3412 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3413 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3415 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3416 MachinePointerInfo::getFixedStack(
3417 DAG.getMachineFunction(),
3418 FuncInfo->getRegSaveFrameIndex(), Offset));
3419 MemOps.push_back(Store);
3423 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3424 // Now store the XMM (fp + vector) parameter registers.
3425 SmallVector<SDValue, 12> SaveXMMOps;
3426 SaveXMMOps.push_back(Chain);
3427 SaveXMMOps.push_back(ALVal);
3428 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3429 FuncInfo->getRegSaveFrameIndex(), dl));
3430 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3431 FuncInfo->getVarArgsFPOffset(), dl));
3432 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3434 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3435 MVT::Other, SaveXMMOps));
3438 if (!MemOps.empty())
3439 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3442 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3443 // Find the largest legal vector type.
3444 MVT VecVT = MVT::Other;
3445 // FIXME: Only some x86_32 calling conventions support AVX512.
3446 if (Subtarget.useAVX512Regs() &&
3447 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3448 CallConv == CallingConv::Intel_OCL_BI)))
3449 VecVT = MVT::v16f32;
3450 else if (Subtarget.hasAVX())
3452 else if (Subtarget.hasSSE2())
3455 // We forward some GPRs and some vector types.
3456 SmallVector<MVT, 2> RegParmTypes;
3457 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3458 RegParmTypes.push_back(IntVT);
3459 if (VecVT != MVT::Other)
3460 RegParmTypes.push_back(VecVT);
3462 // Compute the set of forwarded registers. The rest are scratch.
3463 SmallVectorImpl<ForwardedRegister> &Forwards =
3464 FuncInfo->getForwardedMustTailRegParms();
3465 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3467 // Conservatively forward AL on x86_64, since it might be used for varargs.
3468 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
3469 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3470 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3473 // Copy all forwards from physical to virtual registers.
3474 for (ForwardedRegister &FR : Forwards) {
3475 // FIXME: Can we use a less constrained schedule?
3476 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3477 FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3478 Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3482 // Some CCs need callee pop.
3483 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3484 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3485 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3486 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3487 // X86 interrupts must pop the error code (and the alignment padding) if
3489 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3491 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3492 // If this is an sret function, the return should pop the hidden pointer.
3493 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3494 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3495 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3496 FuncInfo->setBytesToPopOnReturn(4);
3500 // RegSaveFrameIndex is X86-64 only.
3501 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3502 if (CallConv == CallingConv::X86_FastCall ||
3503 CallConv == CallingConv::X86_ThisCall)
3504 // fastcc functions can't have varargs.
3505 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3508 FuncInfo->setArgumentStackSize(StackSize);
3510 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3511 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3512 if (Personality == EHPersonality::CoreCLR) {
3514 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3515 // that we'd prefer this slot be allocated towards the bottom of the frame
3516 // (i.e. near the stack pointer after allocating the frame). Every
3517 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3518 // offset from the bottom of this and each funclet's frame must be the
3519 // same, so the size of funclets' (mostly empty) frames is dictated by
3520 // how far this slot is from the bottom (since they allocate just enough
3521 // space to accommodate holding this slot at the correct offset).
3522 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3523 EHInfo->PSPSymFrameIdx = PSPSymFI;
3527 if (CallConv == CallingConv::X86_RegCall ||
3528 F.hasFnAttribute("no_caller_saved_registers")) {
3529 MachineRegisterInfo &MRI = MF.getRegInfo();
3530 for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3531 MRI.disableCalleeSavedRegister(Pair.first);
3537 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3538 SDValue Arg, const SDLoc &dl,
3540 const CCValAssign &VA,
3541 ISD::ArgFlagsTy Flags) const {
3542 unsigned LocMemOffset = VA.getLocMemOffset();
3543 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3544 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3546 if (Flags.isByVal())
3547 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3549 return DAG.getStore(
3550 Chain, dl, Arg, PtrOff,
3551 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3554 /// Emit a load of return address if tail call
3555 /// optimization is performed and it is required.
3556 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3557 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3558 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3559 // Adjust the Return address stack slot.
3560 EVT VT = getPointerTy(DAG.getDataLayout());
3561 OutRetAddr = getReturnAddressFrameIndex(DAG);
3563 // Load the "old" Return address.
3564 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3565 return SDValue(OutRetAddr.getNode(), 1);
3568 /// Emit a store of the return address if tail call
3569 /// optimization is performed and it is required (FPDiff!=0).
3570 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3571 SDValue Chain, SDValue RetAddrFrIdx,
3572 EVT PtrVT, unsigned SlotSize,
3573 int FPDiff, const SDLoc &dl) {
3574 // Store the return address to the appropriate stack slot.
3575 if (!FPDiff) return Chain;
3576 // Calculate the new stack slot for the return address.
3577 int NewReturnAddrFI =
3578 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3580 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3581 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3582 MachinePointerInfo::getFixedStack(
3583 DAG.getMachineFunction(), NewReturnAddrFI));
3587 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3588 /// operation of specified width.
3589 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3591 unsigned NumElems = VT.getVectorNumElements();
3592 SmallVector<int, 8> Mask;
3593 Mask.push_back(NumElems);
3594 for (unsigned i = 1; i != NumElems; ++i)
3596 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3600 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3601 SmallVectorImpl<SDValue> &InVals) const {
3602 SelectionDAG &DAG = CLI.DAG;
3604 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3605 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3606 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3607 SDValue Chain = CLI.Chain;
3608 SDValue Callee = CLI.Callee;
3609 CallingConv::ID CallConv = CLI.CallConv;
3610 bool &isTailCall = CLI.IsTailCall;
3611 bool isVarArg = CLI.IsVarArg;
3613 MachineFunction &MF = DAG.getMachineFunction();
3614 bool Is64Bit = Subtarget.is64Bit();
3615 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3616 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3617 bool IsSibcall = false;
3618 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3619 CallConv == CallingConv::Tail;
3620 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3621 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
3622 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3623 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3624 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3625 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3626 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3628 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3629 const Module *M = MF.getMMI().getModule();
3630 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3632 MachineFunction::CallSiteInfo CSInfo;
3634 if (CallConv == CallingConv::X86_INTR)
3635 report_fatal_error("X86 interrupts may not be called directly");
3637 if (Attr.getValueAsString() == "true")
3640 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3641 // If we are using a GOT, disable tail calls to external symbols with
3642 // default visibility. Tail calling such a symbol requires using a GOT
3643 // relocation, which forces early binding of the symbol. This breaks code
3644 // that require lazy function symbol resolution. Using musttail or
3645 // GuaranteedTailCallOpt will override this.
3646 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3647 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3648 G->getGlobal()->hasDefaultVisibility()))
3652 bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3654 // Force this to be a tail call. The verifier rules are enough to ensure
3655 // that we can lower this successfully without moving the return address
3658 } else if (isTailCall) {
3659 // Check if it's really possible to do a tail call.
3660 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3661 isVarArg, SR != NotStructReturn,
3662 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3663 Outs, OutVals, Ins, DAG);
3665 // Sibcalls are automatically detected tailcalls which do not require
3667 if (!IsGuaranteeTCO && isTailCall)
3674 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3675 "Var args not supported with calling convention fastcc, ghc or hipe");
3677 // Analyze operands of the call, assigning locations to each operand.
3678 SmallVector<CCValAssign, 16> ArgLocs;
3679 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3681 // Allocate shadow area for Win64.
3683 CCInfo.AllocateStack(32, 8);
3685 CCInfo.AnalyzeArguments(Outs, CC_X86);
3687 // In vectorcall calling convention a second pass is required for the HVA
3689 if (CallingConv::X86_VectorCall == CallConv) {
3690 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3693 // Get a count of how many bytes are to be pushed on the stack.
3694 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3696 // This is a sibcall. The memory operands are available in caller's
3697 // own caller's stack.
3699 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3700 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3703 if (isTailCall && !IsSibcall && !IsMustTail) {
3704 // Lower arguments at fp - stackoffset + fpdiff.
3705 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3707 FPDiff = NumBytesCallerPushed - NumBytes;
3709 // Set the delta of movement of the returnaddr stackslot.
3710 // But only set if delta is greater than previous delta.
3711 if (FPDiff < X86Info->getTCReturnAddrDelta())
3712 X86Info->setTCReturnAddrDelta(FPDiff);
3715 unsigned NumBytesToPush = NumBytes;
3716 unsigned NumBytesToPop = NumBytes;
3718 // If we have an inalloca argument, all stack space has already been allocated
3719 // for us and be right at the top of the stack. We don't support multiple
3720 // arguments passed in memory when using inalloca.
3721 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3723 if (!ArgLocs.back().isMemLoc())
3724 report_fatal_error("cannot use inalloca attribute on a register "
3726 if (ArgLocs.back().getLocMemOffset() != 0)
3727 report_fatal_error("any parameter with the inalloca attribute must be "
3728 "the only memory argument");
3732 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3733 NumBytes - NumBytesToPush, dl);
3735 SDValue RetAddrFrIdx;
3736 // Load return address for tail calls.
3737 if (isTailCall && FPDiff)
3738 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3739 Is64Bit, FPDiff, dl);
3741 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3742 SmallVector<SDValue, 8> MemOpChains;
3745 // The next loop assumes that the locations are in the same order of the
3747 assert(isSortedByValueNo(ArgLocs) &&
3748 "Argument Location list must be sorted before lowering");
3750 // Walk the register/memloc assignments, inserting copies/loads. In the case
3751 // of tail call optimization arguments are handle later.
3752 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3753 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3755 assert(OutIndex < Outs.size() && "Invalid Out index");
3756 // Skip inalloca arguments, they have already been written.
3757 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3758 if (Flags.isInAlloca())
3761 CCValAssign &VA = ArgLocs[I];
3762 EVT RegVT = VA.getLocVT();
3763 SDValue Arg = OutVals[OutIndex];
3764 bool isByVal = Flags.isByVal();
3766 // Promote the value if needed.
3767 switch (VA.getLocInfo()) {
3768 default: llvm_unreachable("Unknown loc info!");
3769 case CCValAssign::Full: break;
3770 case CCValAssign::SExt:
3771 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3773 case CCValAssign::ZExt:
3774 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3776 case CCValAssign::AExt:
3777 if (Arg.getValueType().isVector() &&
3778 Arg.getValueType().getVectorElementType() == MVT::i1)
3779 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3780 else if (RegVT.is128BitVector()) {
3781 // Special case: passing MMX values in XMM registers.
3782 Arg = DAG.getBitcast(MVT::i64, Arg);
3783 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3784 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3786 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3788 case CCValAssign::BCvt:
3789 Arg = DAG.getBitcast(RegVT, Arg);
3791 case CCValAssign::Indirect: {
3793 // Memcpy the argument to a temporary stack slot to prevent
3794 // the caller from seeing any modifications the callee may make
3795 // as guaranteed by the `byval` attribute.
3796 int FrameIdx = MF.getFrameInfo().CreateStackObject(
3797 Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3800 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3802 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3803 // From now on treat this as a regular pointer
3807 // Store the argument.
3808 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3809 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3810 Chain = DAG.getStore(
3811 Chain, dl, Arg, SpillSlot,
3812 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3819 if (VA.needsCustom()) {
3820 assert(VA.getValVT() == MVT::v64i1 &&
3821 "Currently the only custom case is when we split v64i1 to 2 regs");
3822 // Split v64i1 value into two registers
3823 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
3824 } else if (VA.isRegLoc()) {
3825 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3826 const TargetOptions &Options = DAG.getTarget().Options;
3827 if (Options.EnableDebugEntryValues)
3828 CSInfo.emplace_back(VA.getLocReg(), I);
3829 if (isVarArg && IsWin64) {
3830 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3831 // shadow reg if callee is a varargs function.
3832 unsigned ShadowReg = 0;
3833 switch (VA.getLocReg()) {
3834 case X86::XMM0: ShadowReg = X86::RCX; break;
3835 case X86::XMM1: ShadowReg = X86::RDX; break;
3836 case X86::XMM2: ShadowReg = X86::R8; break;
3837 case X86::XMM3: ShadowReg = X86::R9; break;
3840 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3842 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3843 assert(VA.isMemLoc());
3844 if (!StackPtr.getNode())
3845 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3846 getPointerTy(DAG.getDataLayout()));
3847 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3848 dl, DAG, VA, Flags));
3852 if (!MemOpChains.empty())
3853 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3855 if (Subtarget.isPICStyleGOT()) {
3856 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3859 RegsToPass.push_back(std::make_pair(
3860 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3861 getPointerTy(DAG.getDataLayout()))));
3863 // If we are tail calling and generating PIC/GOT style code load the
3864 // address of the callee into ECX. The value in ecx is used as target of
3865 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3866 // for tail calls on PIC/GOT architectures. Normally we would just put the
3867 // address of GOT into ebx and then call target@PLT. But for tail calls
3868 // ebx would be restored (since ebx is callee saved) before jumping to the
3871 // Note: The actual moving to ECX is done further down.
3872 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3873 if (G && !G->getGlobal()->hasLocalLinkage() &&
3874 G->getGlobal()->hasDefaultVisibility())
3875 Callee = LowerGlobalAddress(Callee, DAG);
3876 else if (isa<ExternalSymbolSDNode>(Callee))
3877 Callee = LowerExternalSymbol(Callee, DAG);
3881 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3882 // From AMD64 ABI document:
3883 // For calls that may call functions that use varargs or stdargs
3884 // (prototype-less calls or calls to functions containing ellipsis (...) in
3885 // the declaration) %al is used as hidden argument to specify the number
3886 // of SSE registers used. The contents of %al do not need to match exactly
3887 // the number of registers, but must be an ubound on the number of SSE
3888 // registers used and is in the range 0 - 8 inclusive.
3890 // Count the number of XMM registers allocated.
3891 static const MCPhysReg XMMArgRegs[] = {
3892 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3893 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3895 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3896 assert((Subtarget.hasSSE1() || !NumXMMRegs)
3897 && "SSE registers cannot be used when SSE is disabled");
3899 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3900 DAG.getConstant(NumXMMRegs, dl,
3904 if (isVarArg && IsMustTail) {
3905 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3906 for (const auto &F : Forwards) {
3907 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3908 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3912 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3913 // don't need this because the eligibility check rejects calls that require
3914 // shuffling arguments passed in memory.
3915 if (!IsSibcall && isTailCall) {
3916 // Force all the incoming stack arguments to be loaded from the stack
3917 // before any new outgoing arguments are stored to the stack, because the
3918 // outgoing stack slots may alias the incoming argument stack slots, and
3919 // the alias isn't otherwise explicit. This is slightly more conservative
3920 // than necessary, because it means that each store effectively depends
3921 // on every argument instead of just those arguments it would clobber.
3922 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3924 SmallVector<SDValue, 8> MemOpChains2;
3927 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
3929 CCValAssign &VA = ArgLocs[I];
3931 if (VA.isRegLoc()) {
3932 if (VA.needsCustom()) {
3933 assert((CallConv == CallingConv::X86_RegCall) &&
3934 "Expecting custom case only in regcall calling convention");
3935 // This means that we are in special case where one argument was
3936 // passed through two register locations - Skip the next location
3943 assert(VA.isMemLoc());
3944 SDValue Arg = OutVals[OutsIndex];
3945 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
3946 // Skip inalloca arguments. They don't require any work.
3947 if (Flags.isInAlloca())
3949 // Create frame index.
3950 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3951 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3952 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
3953 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3955 if (Flags.isByVal()) {
3956 // Copy relative to framepointer.
3957 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3958 if (!StackPtr.getNode())
3959 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3960 getPointerTy(DAG.getDataLayout()));
3961 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3964 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3968 // Store relative to framepointer.
3969 MemOpChains2.push_back(DAG.getStore(
3970 ArgChain, dl, Arg, FIN,
3971 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3975 if (!MemOpChains2.empty())
3976 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3978 // Store the return address to the appropriate stack slot.
3979 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3980 getPointerTy(DAG.getDataLayout()),
3981 RegInfo->getSlotSize(), FPDiff, dl);
3984 // Build a sequence of copy-to-reg nodes chained together with token chain
3985 // and flag operands which copy the outgoing args into registers.
3987 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3988 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3989 RegsToPass[i].second, InFlag);
3990 InFlag = Chain.getValue(1);
3993 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3994 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3995 // In the 64-bit large code model, we have to make all calls
3996 // through a register, since the call instruction's 32-bit
3997 // pc-relative offset may not be large enough to hold the whole
3999 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4000 Callee->getOpcode() == ISD::ExternalSymbol) {
4001 // Lower direct calls to global addresses and external symbols. Setting
4002 // ForCall to true here has the effect of removing WrapperRIP when possible
4003 // to allow direct calls to be selected without first materializing the
4004 // address into a register.
4005 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4006 } else if (Subtarget.isTarget64BitILP32() &&
4007 Callee->getValueType(0) == MVT::i32) {
4008 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4009 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4012 // Returns a chain & a flag for retval copy to use.
4013 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4014 SmallVector<SDValue, 8> Ops;
4016 if (!IsSibcall && isTailCall) {
4017 Chain = DAG.getCALLSEQ_END(Chain,
4018 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4019 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4020 InFlag = Chain.getValue(1);
4023 Ops.push_back(Chain);
4024 Ops.push_back(Callee);
4027 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4029 // Add argument registers to the end of the list so that they are known live
4031 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4032 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4033 RegsToPass[i].second.getValueType()));
4035 // Add a register mask operand representing the call-preserved registers.
4036 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4037 // set X86_INTR calling convention because it has the same CSR mask
4038 // (same preserved registers).
4039 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4040 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4041 assert(Mask && "Missing call preserved mask for calling convention");
4043 // If this is an invoke in a 32-bit function using a funclet-based
4044 // personality, assume the function clobbers all registers. If an exception
4045 // is thrown, the runtime will not restore CSRs.
4046 // FIXME: Model this more precisely so that we can register allocate across
4047 // the normal edge and spill and fill across the exceptional edge.
4048 if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4049 const Function &CallerFn = MF.getFunction();
4050 EHPersonality Pers =
4051 CallerFn.hasPersonalityFn()
4052 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4053 : EHPersonality::Unknown;
4054 if (isFuncletEHPersonality(Pers))
4055 Mask = RegInfo->getNoPreservedMask();
4058 // Define a new register mask from the existing mask.
4059 uint32_t *RegMask = nullptr;
4061 // In some calling conventions we need to remove the used physical registers
4062 // from the reg mask.
4063 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4064 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4066 // Allocate a new Reg Mask and copy Mask.
4067 RegMask = MF.allocateRegMask();
4068 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4069 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4071 // Make sure all sub registers of the argument registers are reset
4073 for (auto const &RegPair : RegsToPass)
4074 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4075 SubRegs.isValid(); ++SubRegs)
4076 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4078 // Create the RegMask Operand according to our updated mask.
4079 Ops.push_back(DAG.getRegisterMask(RegMask));
4081 // Create the RegMask Operand according to the static mask.
4082 Ops.push_back(DAG.getRegisterMask(Mask));
4085 if (InFlag.getNode())
4086 Ops.push_back(InFlag);
4090 //// If this is the first return lowered for this function, add the regs
4091 //// to the liveout set for the function.
4092 // This isn't right, although it's probably harmless on x86; liveouts
4093 // should be computed from returns not tail calls. Consider a void
4094 // function making a tail call to a function returning int.
4095 MF.getFrameInfo().setHasTailCall();
4096 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4097 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4101 if (HasNoCfCheck && IsCFProtectionSupported) {
4102 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4104 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4106 InFlag = Chain.getValue(1);
4107 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4109 // Save heapallocsite metadata.
4111 if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4112 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4114 // Create the CALLSEQ_END node.
4115 unsigned NumBytesForCalleeToPop;
4116 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4117 DAG.getTarget().Options.GuaranteedTailCallOpt))
4118 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4119 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4120 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4121 SR == StackStructReturn)
4122 // If this is a call to a struct-return function, the callee
4123 // pops the hidden struct pointer, so we have to push it back.
4124 // This is common for Darwin/X86, Linux & Mingw32 targets.
4125 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4126 NumBytesForCalleeToPop = 4;
4128 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4130 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4131 // No need to reset the stack after the call if the call doesn't return. To
4132 // make the MI verify, we'll pretend the callee does it for us.
4133 NumBytesForCalleeToPop = NumBytes;
4136 // Returns a flag for retval copy to use.
4138 Chain = DAG.getCALLSEQ_END(Chain,
4139 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4140 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4143 InFlag = Chain.getValue(1);
4146 // Handle result values, copying them out of physregs into vregs that we
4148 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4152 //===----------------------------------------------------------------------===//
4153 // Fast Calling Convention (tail call) implementation
4154 //===----------------------------------------------------------------------===//
4156 // Like std call, callee cleans arguments, convention except that ECX is
4157 // reserved for storing the tail called function address. Only 2 registers are
4158 // free for argument passing (inreg). Tail call optimization is performed
4160 // * tailcallopt is enabled
4161 // * caller/callee are fastcc
4162 // On X86_64 architecture with GOT-style position independent code only local
4163 // (within module) calls are supported at the moment.
4164 // To keep the stack aligned according to platform abi the function
4165 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4166 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4167 // If a tail called function callee has more arguments than the caller the
4168 // caller needs to make sure that there is room to move the RETADDR to. This is
4169 // achieved by reserving an area the size of the argument delta right after the
4170 // original RETADDR, but before the saved framepointer or the spilled registers
4171 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4183 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4186 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
4187 SelectionDAG& DAG) const {
4188 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4189 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
4190 unsigned StackAlignment = TFI.getStackAlignment();
4191 uint64_t AlignMask = StackAlignment - 1;
4192 int64_t Offset = StackSize;
4193 unsigned SlotSize = RegInfo->getSlotSize();
4194 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
4195 // Number smaller than 12 so just add the difference.
4196 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
4198 // Mask out lower bits, add stackalignment once plus the 12 bytes.
4199 Offset = ((~AlignMask) & Offset) + StackAlignment +
4200 (StackAlignment-SlotSize);
4205 /// Return true if the given stack call argument is already available in the
4206 /// same position (relatively) of the caller's incoming argument stack.
4208 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4209 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4210 const X86InstrInfo *TII, const CCValAssign &VA) {
4211 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4214 // Look through nodes that don't alter the bits of the incoming value.
4215 unsigned Op = Arg.getOpcode();
4216 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4217 Arg = Arg.getOperand(0);
4220 if (Op == ISD::TRUNCATE) {
4221 const SDValue &TruncInput = Arg.getOperand(0);
4222 if (TruncInput.getOpcode() == ISD::AssertZext &&
4223 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4224 Arg.getValueType()) {
4225 Arg = TruncInput.getOperand(0);
4233 if (Arg.getOpcode() == ISD::CopyFromReg) {
4234 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4235 if (!Register::isVirtualRegister(VR))
4237 MachineInstr *Def = MRI->getVRegDef(VR);
4240 if (!Flags.isByVal()) {
4241 if (!TII->isLoadFromStackSlot(*Def, FI))
4244 unsigned Opcode = Def->getOpcode();
4245 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4246 Opcode == X86::LEA64_32r) &&
4247 Def->getOperand(1).isFI()) {
4248 FI = Def->getOperand(1).getIndex();
4249 Bytes = Flags.getByValSize();
4253 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4254 if (Flags.isByVal())
4255 // ByVal argument is passed in as a pointer but it's now being
4256 // dereferenced. e.g.
4257 // define @foo(%struct.X* %A) {
4258 // tail call @bar(%struct.X* byval %A)
4261 SDValue Ptr = Ld->getBasePtr();
4262 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4265 FI = FINode->getIndex();
4266 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4267 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4268 FI = FINode->getIndex();
4269 Bytes = Flags.getByValSize();
4273 assert(FI != INT_MAX);
4274 if (!MFI.isFixedObjectIndex(FI))
4277 if (Offset != MFI.getObjectOffset(FI))
4280 // If this is not byval, check that the argument stack object is immutable.
4281 // inalloca and argument copy elision can create mutable argument stack
4282 // objects. Byval objects can be mutated, but a byval call intends to pass the
4284 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4287 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4288 // If the argument location is wider than the argument type, check that any
4289 // extension flags match.
4290 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4291 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4296 return Bytes == MFI.getObjectSize(FI);
4299 /// Check whether the call is eligible for tail call optimization. Targets
4300 /// that want to do tail call optimization should implement this function.
4301 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4302 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4303 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4304 const SmallVectorImpl<ISD::OutputArg> &Outs,
4305 const SmallVectorImpl<SDValue> &OutVals,
4306 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4307 if (!mayTailCallThisCC(CalleeCC))
4310 // If -tailcallopt is specified, make fastcc functions tail-callable.
4311 MachineFunction &MF = DAG.getMachineFunction();
4312 const Function &CallerF = MF.getFunction();
4314 // If the function return type is x86_fp80 and the callee return type is not,
4315 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4316 // perform a tailcall optimization here.
4317 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4320 CallingConv::ID CallerCC = CallerF.getCallingConv();
4321 bool CCMatch = CallerCC == CalleeCC;
4322 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4323 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4324 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4325 CalleeCC == CallingConv::Tail;
4327 // Win64 functions have extra shadow space for argument homing. Don't do the
4328 // sibcall if the caller and callee have mismatched expectations for this
4330 if (IsCalleeWin64 != IsCallerWin64)
4333 if (IsGuaranteeTCO) {
4334 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4339 // Look for obvious safe cases to perform tail call optimization that do not
4340 // require ABI changes. This is what gcc calls sibcall.
4342 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4343 // emit a special epilogue.
4344 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4345 if (RegInfo->needsStackRealignment(MF))
4348 // Also avoid sibcall optimization if either caller or callee uses struct
4349 // return semantics.
4350 if (isCalleeStructRet || isCallerStructRet)
4353 // Do not sibcall optimize vararg calls unless all arguments are passed via
4355 LLVMContext &C = *DAG.getContext();
4356 if (isVarArg && !Outs.empty()) {
4357 // Optimizing for varargs on Win64 is unlikely to be safe without
4358 // additional testing.
4359 if (IsCalleeWin64 || IsCallerWin64)
4362 SmallVector<CCValAssign, 16> ArgLocs;
4363 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4365 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4366 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4367 if (!ArgLocs[i].isRegLoc())
4371 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4372 // stack. Therefore, if it's not used by the call it is not safe to optimize
4373 // this into a sibcall.
4374 bool Unused = false;
4375 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4382 SmallVector<CCValAssign, 16> RVLocs;
4383 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4384 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4385 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4386 CCValAssign &VA = RVLocs[i];
4387 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4392 // Check that the call results are passed in the same way.
4393 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4394 RetCC_X86, RetCC_X86))
4396 // The callee has to preserve all registers the caller needs to preserve.
4397 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4398 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4400 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4401 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4405 unsigned StackArgsSize = 0;
4407 // If the callee takes no arguments then go on to check the results of the
4409 if (!Outs.empty()) {
4410 // Check if stack adjustment is needed. For now, do not do this if any
4411 // argument is passed on the stack.
4412 SmallVector<CCValAssign, 16> ArgLocs;
4413 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4415 // Allocate shadow area for Win64
4417 CCInfo.AllocateStack(32, 8);
4419 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4420 StackArgsSize = CCInfo.getNextStackOffset();
4422 if (CCInfo.getNextStackOffset()) {
4423 // Check if the arguments are already laid out in the right way as
4424 // the caller's fixed stack objects.
4425 MachineFrameInfo &MFI = MF.getFrameInfo();
4426 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4427 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4428 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4429 CCValAssign &VA = ArgLocs[i];
4430 SDValue Arg = OutVals[i];
4431 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4432 if (VA.getLocInfo() == CCValAssign::Indirect)
4434 if (!VA.isRegLoc()) {
4435 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4442 bool PositionIndependent = isPositionIndependent();
4443 // If the tailcall address may be in a register, then make sure it's
4444 // possible to register allocate for it. In 32-bit, the call address can
4445 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4446 // callee-saved registers are restored. These happen to be the same
4447 // registers used to pass 'inreg' arguments so watch out for those.
4448 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4449 !isa<ExternalSymbolSDNode>(Callee)) ||
4450 PositionIndependent)) {
4451 unsigned NumInRegs = 0;
4452 // In PIC we need an extra register to formulate the address computation
4454 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4456 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4457 CCValAssign &VA = ArgLocs[i];
4460 Register Reg = VA.getLocReg();
4463 case X86::EAX: case X86::EDX: case X86::ECX:
4464 if (++NumInRegs == MaxInRegs)
4471 const MachineRegisterInfo &MRI = MF.getRegInfo();
4472 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4476 bool CalleeWillPop =
4477 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4478 MF.getTarget().Options.GuaranteedTailCallOpt);
4480 if (unsigned BytesToPop =
4481 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4482 // If we have bytes to pop, the callee must pop them.
4483 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4484 if (!CalleePopMatches)
4486 } else if (CalleeWillPop && StackArgsSize > 0) {
4487 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4495 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4496 const TargetLibraryInfo *libInfo) const {
4497 return X86::createFastISel(funcInfo, libInfo);
4500 //===----------------------------------------------------------------------===//
4501 // Other Lowering Hooks
4502 //===----------------------------------------------------------------------===//
4504 static bool MayFoldLoad(SDValue Op) {
4505 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4508 static bool MayFoldIntoStore(SDValue Op) {
4509 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4512 static bool MayFoldIntoZeroExtend(SDValue Op) {
4513 if (Op.hasOneUse()) {
4514 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4515 return (ISD::ZERO_EXTEND == Opcode);
4520 static bool isTargetShuffle(unsigned Opcode) {
4522 default: return false;
4523 case X86ISD::BLENDI:
4524 case X86ISD::PSHUFB:
4525 case X86ISD::PSHUFD:
4526 case X86ISD::PSHUFHW:
4527 case X86ISD::PSHUFLW:
4529 case X86ISD::INSERTPS:
4530 case X86ISD::EXTRQI:
4531 case X86ISD::INSERTQI:
4532 case X86ISD::PALIGNR:
4533 case X86ISD::VSHLDQ:
4534 case X86ISD::VSRLDQ:
4535 case X86ISD::MOVLHPS:
4536 case X86ISD::MOVHLPS:
4537 case X86ISD::MOVSHDUP:
4538 case X86ISD::MOVSLDUP:
4539 case X86ISD::MOVDDUP:
4542 case X86ISD::UNPCKL:
4543 case X86ISD::UNPCKH:
4544 case X86ISD::VBROADCAST:
4545 case X86ISD::VPERMILPI:
4546 case X86ISD::VPERMILPV:
4547 case X86ISD::VPERM2X128:
4548 case X86ISD::SHUF128:
4549 case X86ISD::VPERMIL2:
4550 case X86ISD::VPERMI:
4551 case X86ISD::VPPERM:
4552 case X86ISD::VPERMV:
4553 case X86ISD::VPERMV3:
4554 case X86ISD::VZEXT_MOVL:
4559 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4561 default: return false;
4563 case X86ISD::PSHUFB:
4564 case X86ISD::VPERMILPV:
4565 case X86ISD::VPERMIL2:
4566 case X86ISD::VPPERM:
4567 case X86ISD::VPERMV:
4568 case X86ISD::VPERMV3:
4570 // 'Faux' Target Shuffles.
4578 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4579 MachineFunction &MF = DAG.getMachineFunction();
4580 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4581 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4582 int ReturnAddrIndex = FuncInfo->getRAIndex();
4584 if (ReturnAddrIndex == 0) {
4585 // Set up a frame object for the return address.
4586 unsigned SlotSize = RegInfo->getSlotSize();
4587 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4590 FuncInfo->setRAIndex(ReturnAddrIndex);
4593 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4596 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4597 bool hasSymbolicDisplacement) {
4598 // Offset should fit into 32 bit immediate field.
4599 if (!isInt<32>(Offset))
4602 // If we don't have a symbolic displacement - we don't have any extra
4604 if (!hasSymbolicDisplacement)
4607 // FIXME: Some tweaks might be needed for medium code model.
4608 if (M != CodeModel::Small && M != CodeModel::Kernel)
4611 // For small code model we assume that latest object is 16MB before end of 31
4612 // bits boundary. We may also accept pretty large negative constants knowing
4613 // that all objects are in the positive half of address space.
4614 if (M == CodeModel::Small && Offset < 16*1024*1024)
4617 // For kernel code model we know that all object resist in the negative half
4618 // of 32bits address space. We may not accept negative offsets, since they may
4619 // be just off and we may accept pretty large positive ones.
4620 if (M == CodeModel::Kernel && Offset >= 0)
4626 /// Determines whether the callee is required to pop its own arguments.
4627 /// Callee pop is necessary to support tail calls.
4628 bool X86::isCalleePop(CallingConv::ID CallingConv,
4629 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4630 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4631 // can guarantee TCO.
4632 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4635 switch (CallingConv) {
4638 case CallingConv::X86_StdCall:
4639 case CallingConv::X86_FastCall:
4640 case CallingConv::X86_ThisCall:
4641 case CallingConv::X86_VectorCall:
4646 /// Return true if the condition is an unsigned comparison operation.
4647 static bool isX86CCUnsigned(unsigned X86CC) {
4650 llvm_unreachable("Invalid integer condition!");
4666 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4667 switch (SetCCOpcode) {
4668 default: llvm_unreachable("Invalid integer condition!");
4669 case ISD::SETEQ: return X86::COND_E;
4670 case ISD::SETGT: return X86::COND_G;
4671 case ISD::SETGE: return X86::COND_GE;
4672 case ISD::SETLT: return X86::COND_L;
4673 case ISD::SETLE: return X86::COND_LE;
4674 case ISD::SETNE: return X86::COND_NE;
4675 case ISD::SETULT: return X86::COND_B;
4676 case ISD::SETUGT: return X86::COND_A;
4677 case ISD::SETULE: return X86::COND_BE;
4678 case ISD::SETUGE: return X86::COND_AE;
4682 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4683 /// condition code, returning the condition code and the LHS/RHS of the
4684 /// comparison to make.
4685 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4686 bool isFP, SDValue &LHS, SDValue &RHS,
4687 SelectionDAG &DAG) {
4689 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4690 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4691 // X > -1 -> X == 0, jump !sign.
4692 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4693 return X86::COND_NS;
4695 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4696 // X < 0 -> X == 0, jump on sign.
4699 if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4700 // X >= 0 -> X == 0, jump on !sign.
4701 return X86::COND_NS;
4703 if (SetCCOpcode == ISD::SETLT && RHSC->getAPIntValue() == 1) {
4705 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4706 return X86::COND_LE;
4710 return TranslateIntegerX86CC(SetCCOpcode);
4713 // First determine if it is required or is profitable to flip the operands.
4715 // If LHS is a foldable load, but RHS is not, flip the condition.
4716 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4717 !ISD::isNON_EXTLoad(RHS.getNode())) {
4718 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4719 std::swap(LHS, RHS);
4722 switch (SetCCOpcode) {
4728 std::swap(LHS, RHS);
4732 // On a floating point condition, the flags are set as follows:
4734 // 0 | 0 | 0 | X > Y
4735 // 0 | 0 | 1 | X < Y
4736 // 1 | 0 | 0 | X == Y
4737 // 1 | 1 | 1 | unordered
4738 switch (SetCCOpcode) {
4739 default: llvm_unreachable("Condcode should be pre-legalized away");
4741 case ISD::SETEQ: return X86::COND_E;
4742 case ISD::SETOLT: // flipped
4744 case ISD::SETGT: return X86::COND_A;
4745 case ISD::SETOLE: // flipped
4747 case ISD::SETGE: return X86::COND_AE;
4748 case ISD::SETUGT: // flipped
4750 case ISD::SETLT: return X86::COND_B;
4751 case ISD::SETUGE: // flipped
4753 case ISD::SETLE: return X86::COND_BE;
4755 case ISD::SETNE: return X86::COND_NE;
4756 case ISD::SETUO: return X86::COND_P;
4757 case ISD::SETO: return X86::COND_NP;
4759 case ISD::SETUNE: return X86::COND_INVALID;
4763 /// Is there a floating point cmov for the specific X86 condition code?
4764 /// Current x86 isa includes the following FP cmov instructions:
4765 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4766 static bool hasFPCMov(unsigned X86CC) {
4783 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4785 MachineFunction &MF,
4786 unsigned Intrinsic) const {
4788 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4792 Info.flags = MachineMemOperand::MONone;
4795 switch (IntrData->Type) {
4796 case TRUNCATE_TO_MEM_VI8:
4797 case TRUNCATE_TO_MEM_VI16:
4798 case TRUNCATE_TO_MEM_VI32: {
4799 Info.opc = ISD::INTRINSIC_VOID;
4800 Info.ptrVal = I.getArgOperand(0);
4801 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4802 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4803 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4805 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4806 ScalarVT = MVT::i16;
4807 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4808 ScalarVT = MVT::i32;
4810 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4811 Info.align = Align::None();
4812 Info.flags |= MachineMemOperand::MOStore;
4817 Info.opc = ISD::INTRINSIC_W_CHAIN;
4818 Info.ptrVal = nullptr;
4819 MVT DataVT = MVT::getVT(I.getType());
4820 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4821 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4822 IndexVT.getVectorNumElements());
4823 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4824 Info.align = Align::None();
4825 Info.flags |= MachineMemOperand::MOLoad;
4829 Info.opc = ISD::INTRINSIC_VOID;
4830 Info.ptrVal = nullptr;
4831 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4832 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4833 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4834 IndexVT.getVectorNumElements());
4835 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4836 Info.align = Align::None();
4837 Info.flags |= MachineMemOperand::MOStore;
4847 /// Returns true if the target can instruction select the
4848 /// specified FP immediate natively. If false, the legalizer will
4849 /// materialize the FP immediate as a load from a constant pool.
4850 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4851 bool ForCodeSize) const {
4852 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4853 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4859 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4860 ISD::LoadExtType ExtTy,
4862 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
4864 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4865 // relocation target a movq or addq instruction: don't let the load shrink.
4866 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4867 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4868 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4869 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4871 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
4872 // those uses are extracted directly into a store, then the extract + store
4873 // can be store-folded. Therefore, it's probably not worth splitting the load.
4874 EVT VT = Load->getValueType(0);
4875 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
4876 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
4877 // Skip uses of the chain value. Result 0 of the node is the load value.
4878 if (UI.getUse().getResNo() != 0)
4881 // If this use is not an extract + store, it's probably worth splitting.
4882 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
4883 UI->use_begin()->getOpcode() != ISD::STORE)
4886 // All non-chain uses are extract + store.
4893 /// Returns true if it is beneficial to convert a load of a constant
4894 /// to just the constant itself.
4895 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4897 assert(Ty->isIntegerTy());
4899 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4900 if (BitSize == 0 || BitSize > 64)
4905 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
4906 // If we are using XMM registers in the ABI and the condition of the select is
4907 // a floating-point compare and we have blendv or conditional move, then it is
4908 // cheaper to select instead of doing a cross-register move and creating a
4909 // load that depends on the compare result.
4910 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
4911 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
4914 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
4915 // TODO: It might be a win to ease or lift this restriction, but the generic
4916 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
4917 if (VT.isVector() && Subtarget.hasAVX512())
4923 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
4925 // TODO: We handle scalars using custom code, but generic combining could make
4926 // that unnecessary.
4928 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
4931 // Find the type this will be legalized too. Otherwise we might prematurely
4932 // convert this to shl+add/sub and then still have to type legalize those ops.
4933 // Another choice would be to defer the decision for illegal types until
4934 // after type legalization. But constant splat vectors of i64 can't make it
4935 // through type legalization on 32-bit targets so we would need to special
4937 while (getTypeAction(Context, VT) != TypeLegal)
4938 VT = getTypeToTransformTo(Context, VT);
4940 // If vector multiply is legal, assume that's faster than shl + add/sub.
4941 // TODO: Multiply is a complex op with higher latency and lower throughput in
4942 // most implementations, so this check could be loosened based on type
4943 // and/or a CPU attribute.
4944 if (isOperationLegal(ISD::MUL, VT))
4947 // shl+add, shl+sub, shl+add+neg
4948 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
4949 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
4952 bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
4953 bool IsSigned) const {
4954 // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
4955 return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
4958 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
4959 unsigned Index) const {
4960 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4963 // Mask vectors support all subregister combinations and operations that
4964 // extract half of vector.
4965 if (ResVT.getVectorElementType() == MVT::i1)
4966 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
4967 (Index == ResVT.getVectorNumElements()));
4969 return (Index % ResVT.getVectorNumElements()) == 0;
4972 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
4973 unsigned Opc = VecOp.getOpcode();
4975 // Assume target opcodes can't be scalarized.
4976 // TODO - do we have any exceptions?
4977 if (Opc >= ISD::BUILTIN_OP_END)
4980 // If the vector op is not supported, try to convert to scalar.
4981 EVT VecVT = VecOp.getValueType();
4982 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
4985 // If the vector op is supported, but the scalar op is not, the transform may
4986 // not be worthwhile.
4987 EVT ScalarVT = VecVT.getScalarType();
4988 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
4991 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
4992 // TODO: Allow vectors?
4995 return VT.isSimple() || !isOperationExpand(Opcode, VT);
4998 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4999 // Speculate cttz only if we can directly use TZCNT.
5000 return Subtarget.hasBMI();
5003 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5004 // Speculate ctlz only if we can directly use LZCNT.
5005 return Subtarget.hasLZCNT();
5008 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5009 const SelectionDAG &DAG,
5010 const MachineMemOperand &MMO) const {
5011 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5012 BitcastVT.getVectorElementType() == MVT::i1)
5015 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5018 // If both types are legal vectors, it's always ok to convert them.
5019 if (LoadVT.isVector() && BitcastVT.isVector() &&
5020 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5023 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5026 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5027 const SelectionDAG &DAG) const {
5028 // Do not merge to float value size (128 bytes) if no implicit
5029 // float attribute is set.
5030 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5031 Attribute::NoImplicitFloat);
5034 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5035 return (MemVT.getSizeInBits() <= MaxIntSize);
5037 // Make sure we don't merge greater than our preferred vector
5039 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5044 bool X86TargetLowering::isCtlzFast() const {
5045 return Subtarget.hasFastLZCNT();
5048 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5049 const Instruction &AndI) const {
5053 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5054 EVT VT = Y.getValueType();
5059 if (!Subtarget.hasBMI())
5062 // There are only 32-bit and 64-bit forms for 'andn'.
5063 if (VT != MVT::i32 && VT != MVT::i64)
5066 return !isa<ConstantSDNode>(Y);
5069 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5070 EVT VT = Y.getValueType();
5073 return hasAndNotCompare(Y);
5077 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5080 if (VT == MVT::v4i32)
5083 return Subtarget.hasSSE2();
5086 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5087 return X.getValueType().isScalarInteger(); // 'bt'
5090 bool X86TargetLowering::
5091 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5092 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5093 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5094 SelectionDAG &DAG) const {
5095 // Does baseline recommend not to perform the fold by default?
5096 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5097 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5099 // For scalars this transform is always beneficial.
5100 if (X.getValueType().isScalarInteger())
5102 // If all the shift amounts are identical, then transform is beneficial even
5103 // with rudimentary SSE2 shifts.
5104 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5106 // If we have AVX2 with it's powerful shift operations, then it's also good.
5107 if (Subtarget.hasAVX2())
5109 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5110 return NewShiftOpcode == ISD::SHL;
5113 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5114 const SDNode *N, CombineLevel Level) const {
5115 assert(((N->getOpcode() == ISD::SHL &&
5116 N->getOperand(0).getOpcode() == ISD::SRL) ||
5117 (N->getOpcode() == ISD::SRL &&
5118 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5119 "Expected shift-shift mask");
5120 EVT VT = N->getValueType(0);
5121 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5122 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5123 // Only fold if the shift values are equal - so it folds to AND.
5124 // TODO - we should fold if either is a non-uniform vector but we don't do
5125 // the fold for non-splats yet.
5126 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5128 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5131 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5132 EVT VT = Y.getValueType();
5134 // For vectors, we don't have a preference, but we probably want a mask.
5138 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5139 if (VT == MVT::i64 && !Subtarget.is64Bit())
5145 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5147 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5148 !Subtarget.isOSWindows())
5153 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5154 // Any legal vector type can be splatted more efficiently than
5155 // loading/spilling from memory.
5156 return isTypeLegal(VT);
5159 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5160 MVT VT = MVT::getIntegerVT(NumBits);
5161 if (isTypeLegal(VT))
5164 // PMOVMSKB can handle this.
5165 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5168 // VPMOVMSKB can handle this.
5169 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5172 // TODO: Allow 64-bit type for 32-bit target.
5173 // TODO: 512-bit types should be allowed, but make sure that those
5174 // cases are handled in combineVectorSizedSetCCEquality().
5176 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5179 /// Val is the undef sentinel value or equal to the specified value.
5180 static bool isUndefOrEqual(int Val, int CmpVal) {
5181 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5184 /// Val is either the undef or zero sentinel value.
5185 static bool isUndefOrZero(int Val) {
5186 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5189 /// Return true if every element in Mask, beginning from position Pos and ending
5190 /// in Pos+Size is the undef sentinel value.
5191 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5192 return llvm::all_of(Mask.slice(Pos, Size),
5193 [](int M) { return M == SM_SentinelUndef; });
5196 /// Return true if the mask creates a vector whose lower half is undefined.
5197 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5198 unsigned NumElts = Mask.size();
5199 return isUndefInRange(Mask, 0, NumElts / 2);
5202 /// Return true if the mask creates a vector whose upper half is undefined.
5203 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5204 unsigned NumElts = Mask.size();
5205 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5208 /// Return true if Val falls within the specified range (L, H].
5209 static bool isInRange(int Val, int Low, int Hi) {
5210 return (Val >= Low && Val < Hi);
5213 /// Return true if the value of any element in Mask falls within the specified
5215 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5216 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5219 /// Return true if Val is undef or if its value falls within the
5220 /// specified range (L, H].
5221 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5222 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5225 /// Return true if every element in Mask is undef or if its value
5226 /// falls within the specified range (L, H].
5227 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5228 return llvm::all_of(
5229 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5232 /// Return true if Val is undef, zero or if its value falls within the
5233 /// specified range (L, H].
5234 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5235 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5238 /// Return true if every element in Mask is undef, zero or if its value
5239 /// falls within the specified range (L, H].
5240 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5241 return llvm::all_of(
5242 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5245 /// Return true if every element in Mask, beginning
5246 /// from position Pos and ending in Pos + Size, falls within the specified
5247 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5248 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5249 unsigned Size, int Low, int Step = 1) {
5250 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5251 if (!isUndefOrEqual(Mask[i], Low))
5256 /// Return true if every element in Mask, beginning
5257 /// from position Pos and ending in Pos+Size, falls within the specified
5258 /// sequential range (Low, Low+Size], or is undef or is zero.
5259 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5260 unsigned Size, int Low,
5262 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5263 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5268 /// Return true if every element in Mask, beginning
5269 /// from position Pos and ending in Pos+Size is undef or is zero.
5270 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5272 return llvm::all_of(Mask.slice(Pos, Size),
5273 [](int M) { return isUndefOrZero(M); });
5276 /// Helper function to test whether a shuffle mask could be
5277 /// simplified by widening the elements being shuffled.
5279 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5280 /// leaves it in an unspecified state.
5282 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5283 /// shuffle masks. The latter have the special property of a '-2' representing
5284 /// a zero-ed lane of a vector.
5285 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5286 SmallVectorImpl<int> &WidenedMask) {
5287 WidenedMask.assign(Mask.size() / 2, 0);
5288 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5290 int M1 = Mask[i + 1];
5292 // If both elements are undef, its trivial.
5293 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5294 WidenedMask[i / 2] = SM_SentinelUndef;
5298 // Check for an undef mask and a mask value properly aligned to fit with
5299 // a pair of values. If we find such a case, use the non-undef mask's value.
5300 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5301 WidenedMask[i / 2] = M1 / 2;
5304 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5305 WidenedMask[i / 2] = M0 / 2;
5309 // When zeroing, we need to spread the zeroing across both lanes to widen.
5310 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5311 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5312 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5313 WidenedMask[i / 2] = SM_SentinelZero;
5319 // Finally check if the two mask values are adjacent and aligned with
5321 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5322 WidenedMask[i / 2] = M0 / 2;
5326 // Otherwise we can't safely widen the elements used in this shuffle.
5329 assert(WidenedMask.size() == Mask.size() / 2 &&
5330 "Incorrect size of mask after widening the elements!");
5335 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5336 const APInt &Zeroable,
5337 SmallVectorImpl<int> &WidenedMask) {
5338 SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
5339 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
5340 if (TargetMask[i] == SM_SentinelUndef)
5343 TargetMask[i] = SM_SentinelZero;
5345 return canWidenShuffleElements(TargetMask, WidenedMask);
5348 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5349 SmallVector<int, 32> WidenedMask;
5350 return canWidenShuffleElements(Mask, WidenedMask);
5353 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5354 bool X86::isZeroNode(SDValue Elt) {
5355 return isNullConstant(Elt) || isNullFPConstant(Elt);
5358 // Build a vector of constants.
5359 // Use an UNDEF node if MaskElt == -1.
5360 // Split 64-bit constants in the 32-bit mode.
5361 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5362 const SDLoc &dl, bool IsMask = false) {
5364 SmallVector<SDValue, 32> Ops;
5367 MVT ConstVecVT = VT;
5368 unsigned NumElts = VT.getVectorNumElements();
5369 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5370 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5371 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5375 MVT EltVT = ConstVecVT.getVectorElementType();
5376 for (unsigned i = 0; i < NumElts; ++i) {
5377 bool IsUndef = Values[i] < 0 && IsMask;
5378 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5379 DAG.getConstant(Values[i], dl, EltVT);
5380 Ops.push_back(OpNode);
5382 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5383 DAG.getConstant(0, dl, EltVT));
5385 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5387 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5391 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5392 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5393 assert(Bits.size() == Undefs.getBitWidth() &&
5394 "Unequal constant and undef arrays");
5395 SmallVector<SDValue, 32> Ops;
5398 MVT ConstVecVT = VT;
5399 unsigned NumElts = VT.getVectorNumElements();
5400 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5401 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5402 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5406 MVT EltVT = ConstVecVT.getVectorElementType();
5407 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5409 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5412 const APInt &V = Bits[i];
5413 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5415 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5416 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5417 } else if (EltVT == MVT::f32) {
5418 APFloat FV(APFloat::IEEEsingle(), V);
5419 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5420 } else if (EltVT == MVT::f64) {
5421 APFloat FV(APFloat::IEEEdouble(), V);
5422 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5424 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5428 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5429 return DAG.getBitcast(VT, ConstsNode);
5432 /// Returns a vector of specified type with all zero elements.
5433 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5434 SelectionDAG &DAG, const SDLoc &dl) {
5435 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5436 VT.getVectorElementType() == MVT::i1) &&
5437 "Unexpected vector type");
5439 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5440 // type. This ensures they get CSE'd. But if the integer type is not
5441 // available, use a floating-point +0.0 instead.
5443 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5444 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5445 } else if (VT.isFloatingPoint()) {
5446 Vec = DAG.getConstantFP(+0.0, dl, VT);
5447 } else if (VT.getVectorElementType() == MVT::i1) {
5448 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5449 "Unexpected vector type");
5450 Vec = DAG.getConstant(0, dl, VT);
5452 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5453 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5455 return DAG.getBitcast(VT, Vec);
5458 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5459 const SDLoc &dl, unsigned vectorWidth) {
5460 EVT VT = Vec.getValueType();
5461 EVT ElVT = VT.getVectorElementType();
5462 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5463 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5464 VT.getVectorNumElements()/Factor);
5466 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5467 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5468 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5470 // This is the index of the first element of the vectorWidth-bit chunk
5471 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5472 IdxVal &= ~(ElemsPerChunk - 1);
5474 // If the input is a buildvector just emit a smaller one.
5475 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5476 return DAG.getBuildVector(ResultVT, dl,
5477 Vec->ops().slice(IdxVal, ElemsPerChunk));
5479 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5480 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5483 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5484 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5485 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5486 /// instructions or a simple subregister reference. Idx is an index in the
5487 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5488 /// lowering EXTRACT_VECTOR_ELT operations easier.
5489 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5490 SelectionDAG &DAG, const SDLoc &dl) {
5491 assert((Vec.getValueType().is256BitVector() ||
5492 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5493 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5496 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5497 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5498 SelectionDAG &DAG, const SDLoc &dl) {
5499 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5500 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5503 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5504 SelectionDAG &DAG, const SDLoc &dl,
5505 unsigned vectorWidth) {
5506 assert((vectorWidth == 128 || vectorWidth == 256) &&
5507 "Unsupported vector width");
5508 // Inserting UNDEF is Result
5511 EVT VT = Vec.getValueType();
5512 EVT ElVT = VT.getVectorElementType();
5513 EVT ResultVT = Result.getValueType();
5515 // Insert the relevant vectorWidth bits.
5516 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5517 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5519 // This is the index of the first element of the vectorWidth-bit chunk
5520 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5521 IdxVal &= ~(ElemsPerChunk - 1);
5523 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5524 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5527 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5528 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5529 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5530 /// simple superregister reference. Idx is an index in the 128 bits
5531 /// we want. It need not be aligned to a 128-bit boundary. That makes
5532 /// lowering INSERT_VECTOR_ELT operations easier.
5533 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5534 SelectionDAG &DAG, const SDLoc &dl) {
5535 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5536 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5539 /// Widen a vector to a larger size with the same scalar type, with the new
5540 /// elements either zero or undef.
5541 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5542 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5544 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5545 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5546 "Unsupported vector widening type");
5547 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5549 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5550 DAG.getIntPtrConstant(0, dl));
5553 /// Widen a vector to a larger size with the same scalar type, with the new
5554 /// elements either zero or undef.
5555 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5556 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5557 const SDLoc &dl, unsigned WideSizeInBits) {
5558 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5559 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5560 "Unsupported vector widening type");
5561 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5562 MVT SVT = Vec.getSimpleValueType().getScalarType();
5563 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5564 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5567 // Helper function to collect subvector ops that are concated together,
5568 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5569 // The subvectors in Ops are guaranteed to be the same type.
5570 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5571 assert(Ops.empty() && "Expected an empty ops vector");
5573 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5574 Ops.append(N->op_begin(), N->op_end());
5578 if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5579 isa<ConstantSDNode>(N->getOperand(2))) {
5580 SDValue Src = N->getOperand(0);
5581 SDValue Sub = N->getOperand(1);
5582 const APInt &Idx = N->getConstantOperandAPInt(2);
5583 EVT VT = Src.getValueType();
5584 EVT SubVT = Sub.getValueType();
5586 // TODO - Handle more general insert_subvector chains.
5587 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5588 Idx == (VT.getVectorNumElements() / 2) &&
5589 Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5590 Src.getOperand(1).getValueType() == SubVT &&
5591 isNullConstant(Src.getOperand(2))) {
5592 Ops.push_back(Src.getOperand(1));
5601 // Helper for splitting operands of an operation to legal target size and
5602 // apply a function on each part.
5603 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5604 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5605 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5606 // The argument Builder is a function that will be applied on each split part:
5607 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5608 template <typename F>
5609 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5610 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5611 F Builder, bool CheckBWI = true) {
5612 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5613 unsigned NumSubs = 1;
5614 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5615 (!CheckBWI && Subtarget.useAVX512Regs())) {
5616 if (VT.getSizeInBits() > 512) {
5617 NumSubs = VT.getSizeInBits() / 512;
5618 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5620 } else if (Subtarget.hasAVX2()) {
5621 if (VT.getSizeInBits() > 256) {
5622 NumSubs = VT.getSizeInBits() / 256;
5623 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5626 if (VT.getSizeInBits() > 128) {
5627 NumSubs = VT.getSizeInBits() / 128;
5628 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5633 return Builder(DAG, DL, Ops);
5635 SmallVector<SDValue, 4> Subs;
5636 for (unsigned i = 0; i != NumSubs; ++i) {
5637 SmallVector<SDValue, 2> SubOps;
5638 for (SDValue Op : Ops) {
5639 EVT OpVT = Op.getValueType();
5640 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5641 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5642 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5644 Subs.push_back(Builder(DAG, DL, SubOps));
5646 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5649 /// Insert i1-subvector to i1-vector.
5650 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5651 const X86Subtarget &Subtarget) {
5654 SDValue Vec = Op.getOperand(0);
5655 SDValue SubVec = Op.getOperand(1);
5656 SDValue Idx = Op.getOperand(2);
5658 if (!isa<ConstantSDNode>(Idx))
5661 // Inserting undef is a nop. We can just return the original vector.
5662 if (SubVec.isUndef())
5665 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5666 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5669 MVT OpVT = Op.getSimpleValueType();
5670 unsigned NumElems = OpVT.getVectorNumElements();
5672 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5674 // Extend to natively supported kshift.
5675 MVT WideOpVT = OpVT;
5676 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5677 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5679 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5681 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5682 // May need to promote to a legal type.
5683 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5684 DAG.getConstant(0, dl, WideOpVT),
5686 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5689 MVT SubVecVT = SubVec.getSimpleValueType();
5690 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5692 assert(IdxVal + SubVecNumElems <= NumElems &&
5693 IdxVal % SubVecVT.getSizeInBits() == 0 &&
5694 "Unexpected index value in INSERT_SUBVECTOR");
5696 SDValue Undef = DAG.getUNDEF(WideOpVT);
5699 // Zero lower bits of the Vec
5700 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
5701 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5703 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5704 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5705 // Merge them together, SubVec should be zero extended.
5706 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5707 DAG.getConstant(0, dl, WideOpVT),
5709 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5710 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5713 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5714 Undef, SubVec, ZeroIdx);
5716 if (Vec.isUndef()) {
5717 assert(IdxVal != 0 && "Unexpected index");
5718 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5719 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5720 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5723 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5724 assert(IdxVal != 0 && "Unexpected index");
5725 NumElems = WideOpVT.getVectorNumElements();
5726 unsigned ShiftLeft = NumElems - SubVecNumElems;
5727 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5728 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5729 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5730 if (ShiftRight != 0)
5731 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5732 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5733 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5736 // Simple case when we put subvector in the upper part
5737 if (IdxVal + SubVecNumElems == NumElems) {
5738 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5739 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5740 if (SubVecNumElems * 2 == NumElems) {
5741 // Special case, use legal zero extending insert_subvector. This allows
5742 // isel to opimitize when bits are known zero.
5743 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5744 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5745 DAG.getConstant(0, dl, WideOpVT),
5748 // Otherwise use explicit shifts to zero the bits.
5749 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5750 Undef, Vec, ZeroIdx);
5751 NumElems = WideOpVT.getVectorNumElements();
5752 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
5753 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5754 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5756 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5757 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5760 // Inserting into the middle is more complicated.
5762 NumElems = WideOpVT.getVectorNumElements();
5764 // Widen the vector if needed.
5765 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5767 // Clear the upper bits of the subvector and move it to its insert position.
5768 unsigned ShiftLeft = NumElems - SubVecNumElems;
5769 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5770 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5771 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5772 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5773 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5775 // Isolate the bits below the insertion point.
5776 unsigned LowShift = NumElems - IdxVal;
5777 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
5778 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5779 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
5780 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5782 // Isolate the bits after the last inserted bit.
5783 unsigned HighShift = IdxVal + SubVecNumElems;
5784 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5785 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5786 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
5787 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5789 // Now OR all 3 pieces together.
5790 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
5791 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
5793 // Reduce to original width if needed.
5794 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5797 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
5799 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
5800 EVT SubVT = V1.getValueType();
5801 EVT SubSVT = SubVT.getScalarType();
5802 unsigned SubNumElts = SubVT.getVectorNumElements();
5803 unsigned SubVectorWidth = SubVT.getSizeInBits();
5804 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
5805 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
5806 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
5809 /// Returns a vector of specified type with all bits set.
5810 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5811 /// Then bitcast to their original type, ensuring they get CSE'd.
5812 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5813 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5814 "Expected a 128/256/512-bit vector type");
5816 APInt Ones = APInt::getAllOnesValue(32);
5817 unsigned NumElts = VT.getSizeInBits() / 32;
5818 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5819 return DAG.getBitcast(VT, Vec);
5822 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
5823 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
5825 case ISD::ANY_EXTEND:
5826 case ISD::ANY_EXTEND_VECTOR_INREG:
5827 return ISD::ANY_EXTEND_VECTOR_INREG;
5828 case ISD::ZERO_EXTEND:
5829 case ISD::ZERO_EXTEND_VECTOR_INREG:
5830 return ISD::ZERO_EXTEND_VECTOR_INREG;
5831 case ISD::SIGN_EXTEND:
5832 case ISD::SIGN_EXTEND_VECTOR_INREG:
5833 return ISD::SIGN_EXTEND_VECTOR_INREG;
5835 llvm_unreachable("Unknown opcode");
5838 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
5839 SDValue In, SelectionDAG &DAG) {
5840 EVT InVT = In.getValueType();
5841 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
5842 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
5843 ISD::ZERO_EXTEND == Opcode) &&
5844 "Unknown extension opcode");
5846 // For 256-bit vectors, we only need the lower (128-bit) input half.
5847 // For 512-bit vectors, we only need the lower input half or quarter.
5848 if (InVT.getSizeInBits() > 128) {
5849 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
5850 "Expected VTs to be the same size!");
5851 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
5852 In = extractSubVector(In, 0, DAG, DL,
5853 std::max(128U, VT.getSizeInBits() / Scale));
5854 InVT = In.getValueType();
5857 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
5858 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
5860 return DAG.getNode(Opcode, DL, VT, In);
5863 // Match (xor X, -1) -> X.
5864 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
5865 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
5866 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
5867 V = peekThroughBitcasts(V);
5868 if (V.getOpcode() == ISD::XOR &&
5869 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
5870 return V.getOperand(0);
5871 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5872 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
5873 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
5874 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
5875 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
5876 Not, V.getOperand(1));
5879 SmallVector<SDValue, 2> CatOps;
5880 if (collectConcatOps(V.getNode(), CatOps)) {
5881 for (SDValue &CatOp : CatOps) {
5882 SDValue NotCat = IsNOT(CatOp, DAG);
5883 if (!NotCat) return SDValue();
5884 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
5886 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
5891 /// Returns a vector_shuffle node for an unpackl operation.
5892 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5893 SDValue V1, SDValue V2) {
5894 SmallVector<int, 8> Mask;
5895 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
5896 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5899 /// Returns a vector_shuffle node for an unpackh operation.
5900 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5901 SDValue V1, SDValue V2) {
5902 SmallVector<int, 8> Mask;
5903 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
5904 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5907 /// Return a vector_shuffle of the specified vector of zero or undef vector.
5908 /// This produces a shuffle where the low element of V2 is swizzled into the
5909 /// zero/undef vector, landing at element Idx.
5910 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5911 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
5913 const X86Subtarget &Subtarget,
5914 SelectionDAG &DAG) {
5915 MVT VT = V2.getSimpleValueType();
5917 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5918 int NumElems = VT.getVectorNumElements();
5919 SmallVector<int, 16> MaskVec(NumElems);
5920 for (int i = 0; i != NumElems; ++i)
5921 // If this is the insertion idx, put the low elt of V2 here.
5922 MaskVec[i] = (i == Idx) ? NumElems : i;
5923 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
5926 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
5927 if (!Load || !ISD::isNormalLoad(Load))
5930 SDValue Ptr = Load->getBasePtr();
5931 if (Ptr->getOpcode() == X86ISD::Wrapper ||
5932 Ptr->getOpcode() == X86ISD::WrapperRIP)
5933 Ptr = Ptr->getOperand(0);
5935 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
5936 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
5939 return CNode->getConstVal();
5942 static const Constant *getTargetConstantFromNode(SDValue Op) {
5943 Op = peekThroughBitcasts(Op);
5944 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
5948 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
5949 assert(LD && "Unexpected null LoadSDNode");
5950 return getTargetConstantFromNode(LD);
5953 // Extract raw constant bits from constant pools.
5954 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
5956 SmallVectorImpl<APInt> &EltBits,
5957 bool AllowWholeUndefs = true,
5958 bool AllowPartialUndefs = true) {
5959 assert(EltBits.empty() && "Expected an empty EltBits vector");
5961 Op = peekThroughBitcasts(Op);
5963 EVT VT = Op.getValueType();
5964 unsigned SizeInBits = VT.getSizeInBits();
5965 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
5966 unsigned NumElts = SizeInBits / EltSizeInBits;
5968 // Bitcast a source array of element bits to the target size.
5969 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
5970 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
5971 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
5972 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
5973 "Constant bit sizes don't match");
5975 // Don't split if we don't allow undef bits.
5976 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
5977 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
5980 // If we're already the right size, don't bother bitcasting.
5981 if (NumSrcElts == NumElts) {
5982 UndefElts = UndefSrcElts;
5983 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
5987 // Extract all the undef/constant element data and pack into single bitsets.
5988 APInt UndefBits(SizeInBits, 0);
5989 APInt MaskBits(SizeInBits, 0);
5991 for (unsigned i = 0; i != NumSrcElts; ++i) {
5992 unsigned BitOffset = i * SrcEltSizeInBits;
5993 if (UndefSrcElts[i])
5994 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
5995 MaskBits.insertBits(SrcEltBits[i], BitOffset);
5998 // Split the undef/constant single bitset data into the target elements.
5999 UndefElts = APInt(NumElts, 0);
6000 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6002 for (unsigned i = 0; i != NumElts; ++i) {
6003 unsigned BitOffset = i * EltSizeInBits;
6004 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6006 // Only treat an element as UNDEF if all bits are UNDEF.
6007 if (UndefEltBits.isAllOnesValue()) {
6008 if (!AllowWholeUndefs)
6010 UndefElts.setBit(i);
6014 // If only some bits are UNDEF then treat them as zero (or bail if not
6016 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6019 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6024 // Collect constant bits and insert into mask/undef bit masks.
6025 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6026 unsigned UndefBitIndex) {
6029 if (isa<UndefValue>(Cst)) {
6030 Undefs.setBit(UndefBitIndex);
6033 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6034 Mask = CInt->getValue();
6037 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6038 Mask = CFP->getValueAPF().bitcastToAPInt();
6046 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6047 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6048 return CastBitData(UndefSrcElts, SrcEltBits);
6051 // Extract scalar constant bits.
6052 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6053 APInt UndefSrcElts = APInt::getNullValue(1);
6054 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6055 return CastBitData(UndefSrcElts, SrcEltBits);
6057 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6058 APInt UndefSrcElts = APInt::getNullValue(1);
6059 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6060 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6061 return CastBitData(UndefSrcElts, SrcEltBits);
6064 // Extract constant bits from build vector.
6065 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6066 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6067 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6069 APInt UndefSrcElts(NumSrcElts, 0);
6070 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6071 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6072 const SDValue &Src = Op.getOperand(i);
6073 if (Src.isUndef()) {
6074 UndefSrcElts.setBit(i);
6077 auto *Cst = cast<ConstantSDNode>(Src);
6078 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6080 return CastBitData(UndefSrcElts, SrcEltBits);
6082 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6083 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6084 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6086 APInt UndefSrcElts(NumSrcElts, 0);
6087 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6088 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6089 const SDValue &Src = Op.getOperand(i);
6090 if (Src.isUndef()) {
6091 UndefSrcElts.setBit(i);
6094 auto *Cst = cast<ConstantFPSDNode>(Src);
6095 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6096 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6098 return CastBitData(UndefSrcElts, SrcEltBits);
6101 // Extract constant bits from constant pool vector.
6102 if (auto *Cst = getTargetConstantFromNode(Op)) {
6103 Type *CstTy = Cst->getType();
6104 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6105 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6108 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6109 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6111 APInt UndefSrcElts(NumSrcElts, 0);
6112 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6113 for (unsigned i = 0; i != NumSrcElts; ++i)
6114 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6118 return CastBitData(UndefSrcElts, SrcEltBits);
6121 // Extract constant bits from a broadcasted constant pool scalar.
6122 if (Op.getOpcode() == X86ISD::VBROADCAST &&
6123 EltSizeInBits <= VT.getScalarSizeInBits()) {
6124 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6125 unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6126 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6128 APInt UndefSrcElts(NumSrcElts, 0);
6129 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6130 if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6131 if (UndefSrcElts[0])
6132 UndefSrcElts.setBits(0, NumSrcElts);
6133 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6134 return CastBitData(UndefSrcElts, SrcEltBits);
6139 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6140 EltSizeInBits <= VT.getScalarSizeInBits()) {
6141 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6142 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6145 SDValue Ptr = MemIntr->getBasePtr();
6146 if (Ptr->getOpcode() == X86ISD::Wrapper ||
6147 Ptr->getOpcode() == X86ISD::WrapperRIP)
6148 Ptr = Ptr->getOperand(0);
6150 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6151 if (!CNode || CNode->isMachineConstantPoolEntry() ||
6152 CNode->getOffset() != 0)
6155 if (const Constant *C = CNode->getConstVal()) {
6156 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6157 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6159 APInt UndefSrcElts(NumSrcElts, 0);
6160 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6161 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6162 if (UndefSrcElts[0])
6163 UndefSrcElts.setBits(0, NumSrcElts);
6164 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6165 return CastBitData(UndefSrcElts, SrcEltBits);
6170 // Extract constant bits from a subvector broadcast.
6171 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6172 SmallVector<APInt, 16> SubEltBits;
6173 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6174 UndefElts, SubEltBits, AllowWholeUndefs,
6175 AllowPartialUndefs)) {
6176 UndefElts = APInt::getSplat(NumElts, UndefElts);
6177 while (EltBits.size() < NumElts)
6178 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6183 // Extract a rematerialized scalar constant insertion.
6184 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6185 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6186 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6187 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6188 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6190 APInt UndefSrcElts(NumSrcElts, 0);
6191 SmallVector<APInt, 64> SrcEltBits;
6192 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6193 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6194 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6195 return CastBitData(UndefSrcElts, SrcEltBits);
6198 // Insert constant bits from a base and sub vector sources.
6199 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6200 isa<ConstantSDNode>(Op.getOperand(2))) {
6201 // TODO - support insert_subvector through bitcasts.
6202 if (EltSizeInBits != VT.getScalarSizeInBits())
6206 SmallVector<APInt, 32> EltSubBits;
6207 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6208 UndefSubElts, EltSubBits,
6209 AllowWholeUndefs, AllowPartialUndefs) &&
6210 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6211 UndefElts, EltBits, AllowWholeUndefs,
6212 AllowPartialUndefs)) {
6213 unsigned BaseIdx = Op.getConstantOperandVal(2);
6214 UndefElts.insertBits(UndefSubElts, BaseIdx);
6215 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6216 EltBits[BaseIdx + i] = EltSubBits[i];
6221 // Extract constant bits from a subvector's source.
6222 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6223 isa<ConstantSDNode>(Op.getOperand(1))) {
6224 // TODO - support extract_subvector through bitcasts.
6225 if (EltSizeInBits != VT.getScalarSizeInBits())
6228 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6229 UndefElts, EltBits, AllowWholeUndefs,
6230 AllowPartialUndefs)) {
6231 EVT SrcVT = Op.getOperand(0).getValueType();
6232 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6233 unsigned NumSubElts = VT.getVectorNumElements();
6234 unsigned BaseIdx = Op.getConstantOperandVal(1);
6235 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6236 if ((BaseIdx + NumSubElts) != NumSrcElts)
6237 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6239 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6244 // Extract constant bits from shuffle node sources.
6245 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6246 // TODO - support shuffle through bitcasts.
6247 if (EltSizeInBits != VT.getScalarSizeInBits())
6250 ArrayRef<int> Mask = SVN->getMask();
6251 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6252 llvm::any_of(Mask, [](int M) { return M < 0; }))
6255 APInt UndefElts0, UndefElts1;
6256 SmallVector<APInt, 32> EltBits0, EltBits1;
6257 if (isAnyInRange(Mask, 0, NumElts) &&
6258 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6259 UndefElts0, EltBits0, AllowWholeUndefs,
6260 AllowPartialUndefs))
6262 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6263 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6264 UndefElts1, EltBits1, AllowWholeUndefs,
6265 AllowPartialUndefs))
6268 UndefElts = APInt::getNullValue(NumElts);
6269 for (int i = 0; i != (int)NumElts; ++i) {
6272 UndefElts.setBit(i);
6273 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6274 } else if (M < (int)NumElts) {
6276 UndefElts.setBit(i);
6277 EltBits.push_back(EltBits0[M]);
6279 if (UndefElts1[M - NumElts])
6280 UndefElts.setBit(i);
6281 EltBits.push_back(EltBits1[M - NumElts]);
6292 bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6294 SmallVector<APInt, 16> EltBits;
6295 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6296 UndefElts, EltBits, true, false)) {
6297 int SplatIndex = -1;
6298 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6301 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6307 if (0 <= SplatIndex) {
6308 SplatVal = EltBits[SplatIndex];
6318 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6319 unsigned MaskEltSizeInBits,
6320 SmallVectorImpl<uint64_t> &RawMask,
6322 // Extract the raw target constant bits.
6323 SmallVector<APInt, 64> EltBits;
6324 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6325 EltBits, /* AllowWholeUndefs */ true,
6326 /* AllowPartialUndefs */ false))
6329 // Insert the extracted elements into the mask.
6330 for (APInt Elt : EltBits)
6331 RawMask.push_back(Elt.getZExtValue());
6336 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6337 /// Note: This ignores saturation, so inputs must be checked first.
6338 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6340 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6341 unsigned NumElts = VT.getVectorNumElements();
6342 unsigned NumLanes = VT.getSizeInBits() / 128;
6343 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6344 unsigned Offset = Unary ? 0 : NumElts;
6346 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6347 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6348 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6349 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6350 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6354 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6355 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6356 APInt &DemandedLHS, APInt &DemandedRHS) {
6357 int NumLanes = VT.getSizeInBits() / 128;
6358 int NumElts = DemandedElts.getBitWidth();
6359 int NumInnerElts = NumElts / 2;
6360 int NumEltsPerLane = NumElts / NumLanes;
6361 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6363 DemandedLHS = APInt::getNullValue(NumInnerElts);
6364 DemandedRHS = APInt::getNullValue(NumInnerElts);
6366 // Map DemandedElts to the packed operands.
6367 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6368 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6369 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6370 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6371 if (DemandedElts[OuterIdx])
6372 DemandedLHS.setBit(InnerIdx);
6373 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6374 DemandedRHS.setBit(InnerIdx);
6379 // Split the demanded elts of a HADD/HSUB node between its operands.
6380 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6381 APInt &DemandedLHS, APInt &DemandedRHS) {
6382 int NumLanes = VT.getSizeInBits() / 128;
6383 int NumElts = DemandedElts.getBitWidth();
6384 int NumEltsPerLane = NumElts / NumLanes;
6385 int HalfEltsPerLane = NumEltsPerLane / 2;
6387 DemandedLHS = APInt::getNullValue(NumElts);
6388 DemandedRHS = APInt::getNullValue(NumElts);
6390 // Map DemandedElts to the horizontal operands.
6391 for (int Idx = 0; Idx != NumElts; ++Idx) {
6392 if (!DemandedElts[Idx])
6394 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6395 int LocalIdx = Idx % NumEltsPerLane;
6396 if (LocalIdx < HalfEltsPerLane) {
6397 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6398 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6400 LocalIdx -= HalfEltsPerLane;
6401 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6402 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6407 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6408 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6409 /// operands in \p Ops, and returns true.
6410 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6411 /// IsUnary for shuffles which use a single input multiple times, and in those
6412 /// cases it will adjust the mask to only have indices within that single input.
6413 /// It is an error to call this with non-empty Mask/Ops vectors.
6414 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6415 SmallVectorImpl<SDValue> &Ops,
6416 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6417 unsigned NumElems = VT.getVectorNumElements();
6418 unsigned MaskEltSize = VT.getScalarSizeInBits();
6419 SmallVector<uint64_t, 32> RawMask;
6423 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6424 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6427 bool IsFakeUnary = false;
6428 switch (N->getOpcode()) {
6429 case X86ISD::BLENDI:
6430 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6431 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6432 ImmN = N->getOperand(N->getNumOperands() - 1);
6433 DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6434 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6437 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6438 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6439 ImmN = N->getOperand(N->getNumOperands() - 1);
6440 DecodeSHUFPMask(NumElems, MaskEltSize,
6441 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6442 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6444 case X86ISD::INSERTPS:
6445 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6446 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6447 ImmN = N->getOperand(N->getNumOperands() - 1);
6448 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6449 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6451 case X86ISD::EXTRQI:
6452 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6453 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6454 isa<ConstantSDNode>(N->getOperand(2))) {
6455 int BitLen = N->getConstantOperandVal(1);
6456 int BitIdx = N->getConstantOperandVal(2);
6457 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6461 case X86ISD::INSERTQI:
6462 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6463 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6464 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6465 isa<ConstantSDNode>(N->getOperand(3))) {
6466 int BitLen = N->getConstantOperandVal(2);
6467 int BitIdx = N->getConstantOperandVal(3);
6468 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6469 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6472 case X86ISD::UNPCKH:
6473 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6474 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6475 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6476 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6478 case X86ISD::UNPCKL:
6479 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6480 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6481 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6482 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6484 case X86ISD::MOVHLPS:
6485 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6486 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6487 DecodeMOVHLPSMask(NumElems, Mask);
6488 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6490 case X86ISD::MOVLHPS:
6491 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6492 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6493 DecodeMOVLHPSMask(NumElems, Mask);
6494 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6496 case X86ISD::PALIGNR:
6497 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6498 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6499 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6500 ImmN = N->getOperand(N->getNumOperands() - 1);
6501 DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6503 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6504 Ops.push_back(N->getOperand(1));
6505 Ops.push_back(N->getOperand(0));
6507 case X86ISD::VSHLDQ:
6508 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6509 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6510 ImmN = N->getOperand(N->getNumOperands() - 1);
6511 DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6515 case X86ISD::VSRLDQ:
6516 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6517 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6518 ImmN = N->getOperand(N->getNumOperands() - 1);
6519 DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6523 case X86ISD::PSHUFD:
6524 case X86ISD::VPERMILPI:
6525 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6526 ImmN = N->getOperand(N->getNumOperands() - 1);
6527 DecodePSHUFMask(NumElems, MaskEltSize,
6528 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6531 case X86ISD::PSHUFHW:
6532 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6533 ImmN = N->getOperand(N->getNumOperands() - 1);
6534 DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6538 case X86ISD::PSHUFLW:
6539 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6540 ImmN = N->getOperand(N->getNumOperands() - 1);
6541 DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6545 case X86ISD::VZEXT_MOVL:
6546 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6547 DecodeZeroMoveLowMask(NumElems, Mask);
6550 case X86ISD::VBROADCAST: {
6551 SDValue N0 = N->getOperand(0);
6552 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6553 // add the pre-extracted value to the Ops vector.
6554 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6555 N0.getOperand(0).getValueType() == VT &&
6556 N0.getConstantOperandVal(1) == 0)
6557 Ops.push_back(N0.getOperand(0));
6559 // We only decode broadcasts of same-sized vectors, unless the broadcast
6560 // came from an extract from the original width. If we found one, we
6561 // pushed it the Ops vector above.
6562 if (N0.getValueType() == VT || !Ops.empty()) {
6563 DecodeVectorBroadcast(NumElems, Mask);
6569 case X86ISD::VPERMILPV: {
6570 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6572 SDValue MaskNode = N->getOperand(1);
6573 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6575 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6580 case X86ISD::PSHUFB: {
6581 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6582 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6583 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6585 SDValue MaskNode = N->getOperand(1);
6586 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6587 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6592 case X86ISD::VPERMI:
6593 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6594 ImmN = N->getOperand(N->getNumOperands() - 1);
6595 DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6600 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6601 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6602 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6604 case X86ISD::VPERM2X128:
6605 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6606 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6607 ImmN = N->getOperand(N->getNumOperands() - 1);
6608 DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6610 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6612 case X86ISD::SHUF128:
6613 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6614 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6615 ImmN = N->getOperand(N->getNumOperands() - 1);
6616 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6617 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6618 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6620 case X86ISD::MOVSLDUP:
6621 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6622 DecodeMOVSLDUPMask(NumElems, Mask);
6625 case X86ISD::MOVSHDUP:
6626 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6627 DecodeMOVSHDUPMask(NumElems, Mask);
6630 case X86ISD::MOVDDUP:
6631 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6632 DecodeMOVDDUPMask(NumElems, Mask);
6635 case X86ISD::VPERMIL2: {
6636 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6637 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6638 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6639 SDValue MaskNode = N->getOperand(2);
6640 SDValue CtrlNode = N->getOperand(3);
6641 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6642 unsigned CtrlImm = CtrlOp->getZExtValue();
6643 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6645 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6652 case X86ISD::VPPERM: {
6653 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6654 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6655 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6656 SDValue MaskNode = N->getOperand(2);
6657 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6658 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6663 case X86ISD::VPERMV: {
6664 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6666 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6667 Ops.push_back(N->getOperand(1));
6668 SDValue MaskNode = N->getOperand(0);
6669 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6671 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6676 case X86ISD::VPERMV3: {
6677 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6678 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6679 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6680 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6681 Ops.push_back(N->getOperand(0));
6682 Ops.push_back(N->getOperand(2));
6683 SDValue MaskNode = N->getOperand(1);
6684 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6686 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6691 default: llvm_unreachable("unknown target shuffle node");
6694 // Empty mask indicates the decode failed.
6698 // Check if we're getting a shuffle mask with zero'd elements.
6699 if (!AllowSentinelZero)
6700 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6703 // If we have a fake unary shuffle, the shuffle mask is spread across two
6704 // inputs that are actually the same node. Re-map the mask to always point
6705 // into the first input.
6708 if (M >= (int)Mask.size())
6711 // If we didn't already add operands in the opcode-specific code, default to
6712 // adding 1 or 2 operands starting at 0.
6714 Ops.push_back(N->getOperand(0));
6715 if (!IsUnary || IsFakeUnary)
6716 Ops.push_back(N->getOperand(1));
6722 /// Decode a target shuffle mask and inputs and see if any values are
6723 /// known to be undef or zero from their inputs.
6724 /// Returns true if the target shuffle mask was decoded.
6725 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
6726 SmallVectorImpl<SDValue> &Ops,
6727 APInt &KnownUndef, APInt &KnownZero) {
6729 if (!isTargetShuffle(N.getOpcode()))
6732 MVT VT = N.getSimpleValueType();
6733 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
6736 int Size = Mask.size();
6737 SDValue V1 = Ops[0];
6738 SDValue V2 = IsUnary ? V1 : Ops[1];
6739 KnownUndef = KnownZero = APInt::getNullValue(Size);
6741 V1 = peekThroughBitcasts(V1);
6742 V2 = peekThroughBitcasts(V2);
6744 assert((VT.getSizeInBits() % Mask.size()) == 0 &&
6745 "Illegal split of shuffle value type");
6746 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
6748 // Extract known constant input data.
6749 APInt UndefSrcElts[2];
6750 SmallVector<APInt, 32> SrcEltBits[2];
6751 bool IsSrcConstant[2] = {
6752 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
6753 SrcEltBits[0], true, false),
6754 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
6755 SrcEltBits[1], true, false)};
6757 for (int i = 0; i < Size; ++i) {
6760 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
6762 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
6763 if (SM_SentinelUndef == M)
6764 KnownUndef.setBit(i);
6765 if (SM_SentinelZero == M)
6766 KnownZero.setBit(i);
6770 // Determine shuffle input and normalize the mask.
6771 unsigned SrcIdx = M / Size;
6772 SDValue V = M < Size ? V1 : V2;
6775 // We are referencing an UNDEF input.
6777 KnownUndef.setBit(i);
6781 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
6782 // TODO: We currently only set UNDEF for integer types - floats use the same
6783 // registers as vectors and many of the scalar folded loads rely on the
6784 // SCALAR_TO_VECTOR pattern.
6785 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
6786 (Size % V.getValueType().getVectorNumElements()) == 0) {
6787 int Scale = Size / V.getValueType().getVectorNumElements();
6788 int Idx = M / Scale;
6789 if (Idx != 0 && !VT.isFloatingPoint())
6790 KnownUndef.setBit(i);
6791 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
6792 KnownZero.setBit(i);
6796 // Attempt to extract from the source's constant bits.
6797 if (IsSrcConstant[SrcIdx]) {
6798 if (UndefSrcElts[SrcIdx][M])
6799 KnownUndef.setBit(i);
6800 else if (SrcEltBits[SrcIdx][M] == 0)
6801 KnownZero.setBit(i);
6805 assert(VT.getVectorNumElements() == (unsigned)Size &&
6806 "Different mask size from vector size!");
6810 // Replace target shuffle mask elements with known undef/zero sentinels.
6811 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
6812 const APInt &KnownUndef,
6813 const APInt &KnownZero) {
6814 unsigned NumElts = Mask.size();
6815 assert(KnownUndef.getBitWidth() == NumElts &&
6816 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
6818 for (unsigned i = 0; i != NumElts; ++i) {
6820 Mask[i] = SM_SentinelUndef;
6821 else if (KnownZero[i])
6822 Mask[i] = SM_SentinelZero;
6826 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
6827 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
6830 unsigned NumElts = Mask.size();
6831 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
6833 for (unsigned i = 0; i != NumElts; ++i) {
6835 if (SM_SentinelUndef == M)
6836 KnownUndef.setBit(i);
6837 if (SM_SentinelZero == M)
6838 KnownZero.setBit(i);
6842 // Forward declaration (for getFauxShuffleMask recursive check).
6843 // TODO: Use DemandedElts variant.
6844 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
6845 SmallVectorImpl<int> &Mask,
6846 SelectionDAG &DAG, unsigned Depth,
6847 bool ResolveKnownElts);
6849 // Attempt to decode ops that could be represented as a shuffle mask.
6850 // The decoded shuffle mask may contain a different number of elements to the
6851 // destination value type.
6852 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
6853 SmallVectorImpl<int> &Mask,
6854 SmallVectorImpl<SDValue> &Ops,
6855 SelectionDAG &DAG, unsigned Depth,
6856 bool ResolveKnownElts) {
6860 MVT VT = N.getSimpleValueType();
6861 unsigned NumElts = VT.getVectorNumElements();
6862 unsigned NumSizeInBits = VT.getSizeInBits();
6863 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6864 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
6866 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
6868 unsigned Opcode = N.getOpcode();
6870 case ISD::VECTOR_SHUFFLE: {
6871 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
6872 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
6873 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
6874 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
6875 Ops.push_back(N.getOperand(0));
6876 Ops.push_back(N.getOperand(1));
6882 case X86ISD::ANDNP: {
6883 // Attempt to decode as a per-byte mask.
6885 SmallVector<APInt, 32> EltBits;
6886 SDValue N0 = N.getOperand(0);
6887 SDValue N1 = N.getOperand(1);
6888 bool IsAndN = (X86ISD::ANDNP == Opcode);
6889 uint64_t ZeroMask = IsAndN ? 255 : 0;
6890 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
6892 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
6894 Mask.push_back(SM_SentinelUndef);
6897 const APInt &ByteBits = EltBits[i];
6898 if (ByteBits != 0 && ByteBits != 255)
6900 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
6902 Ops.push_back(IsAndN ? N1 : N0);
6906 // Inspect each operand at the byte level. We can merge these into a
6907 // blend shuffle mask if for each byte at least one is masked out (zero).
6909 DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
6911 DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
6912 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
6913 bool IsByteMask = true;
6914 unsigned NumSizeInBytes = NumSizeInBits / 8;
6915 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6916 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
6917 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
6918 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
6919 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
6920 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
6921 if (LHS == 255 && RHS == 0)
6922 SelectMask.setBit(i);
6923 else if (LHS == 255 && RHS == 255)
6925 else if (!(LHS == 0 && RHS == 255))
6929 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
6930 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
6931 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
6932 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
6933 Mask.push_back(Idx);
6936 Ops.push_back(N.getOperand(0));
6937 Ops.push_back(N.getOperand(1));
6942 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
6943 // is a valid shuffle index.
6944 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
6945 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
6946 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
6948 SmallVector<int, 64> SrcMask0, SrcMask1;
6949 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
6950 if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
6952 !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
6955 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
6956 SmallVector<int, 64> Mask0, Mask1;
6957 scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
6958 scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
6959 for (size_t i = 0; i != MaskSize; ++i) {
6960 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
6961 Mask.push_back(SM_SentinelUndef);
6962 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
6963 Mask.push_back(SM_SentinelZero);
6964 else if (Mask1[i] == SM_SentinelZero)
6965 Mask.push_back(Mask0[i]);
6966 else if (Mask0[i] == SM_SentinelZero)
6967 Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
6971 Ops.append(SrcInputs0.begin(), SrcInputs0.end());
6972 Ops.append(SrcInputs1.begin(), SrcInputs1.end());
6975 case ISD::INSERT_SUBVECTOR: {
6976 SDValue Src = N.getOperand(0);
6977 SDValue Sub = N.getOperand(1);
6978 EVT SubVT = Sub.getValueType();
6979 unsigned NumSubElts = SubVT.getVectorNumElements();
6980 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
6981 !N->isOnlyUserOf(Sub.getNode()))
6983 uint64_t InsertIdx = N.getConstantOperandVal(2);
6984 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
6985 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6986 Sub.getOperand(0).getValueType() == VT &&
6987 isa<ConstantSDNode>(Sub.getOperand(1))) {
6988 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
6989 for (int i = 0; i != (int)NumElts; ++i)
6991 for (int i = 0; i != (int)NumSubElts; ++i)
6992 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
6994 Ops.push_back(Sub.getOperand(0));
6997 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
6998 SmallVector<int, 64> SubMask;
6999 SmallVector<SDValue, 2> SubInputs;
7000 if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7001 SubMask, DAG, Depth + 1, ResolveKnownElts))
7003 if (SubMask.size() != NumSubElts) {
7004 assert(((SubMask.size() % NumSubElts) == 0 ||
7005 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7006 if ((NumSubElts % SubMask.size()) == 0) {
7007 int Scale = NumSubElts / SubMask.size();
7008 SmallVector<int,64> ScaledSubMask;
7009 scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
7010 SubMask = ScaledSubMask;
7012 int Scale = SubMask.size() / NumSubElts;
7013 NumSubElts = SubMask.size();
7019 for (SDValue &SubInput : SubInputs) {
7020 EVT SubSVT = SubInput.getValueType().getScalarType();
7021 EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
7022 NumSizeInBits / SubSVT.getSizeInBits());
7023 Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
7024 DAG.getUNDEF(AltVT), SubInput,
7025 DAG.getIntPtrConstant(0, SDLoc(N))));
7027 for (int i = 0; i != (int)NumElts; ++i)
7029 for (int i = 0; i != (int)NumSubElts; ++i) {
7032 int InputIdx = M / NumSubElts;
7033 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7035 Mask[i + InsertIdx] = M;
7039 case ISD::SCALAR_TO_VECTOR: {
7040 // Match against a scalar_to_vector of an extract from a vector,
7041 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
7042 SDValue N0 = N.getOperand(0);
7045 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7046 N0.getOperand(0).getValueType() == VT) ||
7047 (N0.getOpcode() == X86ISD::PEXTRW &&
7048 N0.getOperand(0).getValueType() == MVT::v8i16) ||
7049 (N0.getOpcode() == X86ISD::PEXTRB &&
7050 N0.getOperand(0).getValueType() == MVT::v16i8)) {
7054 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7057 SDValue SrcVec = SrcExtract.getOperand(0);
7058 EVT SrcVT = SrcVec.getValueType();
7059 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7060 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
7062 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7063 if (NumSrcElts <= SrcIdx)
7066 Ops.push_back(SrcVec);
7067 Mask.push_back(SrcIdx);
7068 Mask.append(NumZeros, SM_SentinelZero);
7069 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
7072 case X86ISD::PINSRB:
7073 case X86ISD::PINSRW: {
7074 SDValue InVec = N.getOperand(0);
7075 SDValue InScl = N.getOperand(1);
7076 SDValue InIndex = N.getOperand(2);
7077 if (!isa<ConstantSDNode>(InIndex) ||
7078 cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
7080 uint64_t InIdx = N.getConstantOperandVal(2);
7082 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
7083 if (X86::isZeroNode(InScl)) {
7084 Ops.push_back(InVec);
7085 for (unsigned i = 0; i != NumElts; ++i)
7086 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
7090 // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
7091 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
7093 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
7094 if (InScl.getOpcode() != ExOp)
7097 SDValue ExVec = InScl.getOperand(0);
7098 SDValue ExIndex = InScl.getOperand(1);
7099 if (!isa<ConstantSDNode>(ExIndex) ||
7100 cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
7102 uint64_t ExIdx = InScl.getConstantOperandVal(1);
7104 Ops.push_back(InVec);
7105 Ops.push_back(ExVec);
7106 for (unsigned i = 0; i != NumElts; ++i)
7107 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
7110 case X86ISD::PACKSS:
7111 case X86ISD::PACKUS: {
7112 SDValue N0 = N.getOperand(0);
7113 SDValue N1 = N.getOperand(1);
7114 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7115 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7116 "Unexpected input value type");
7118 APInt EltsLHS, EltsRHS;
7119 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7121 // If we know input saturation won't happen we can treat this
7122 // as a truncation shuffle.
7123 if (Opcode == X86ISD::PACKSS) {
7124 if ((!N0.isUndef() &&
7125 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7127 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7130 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7131 if ((!N0.isUndef() &&
7132 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7134 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7138 bool IsUnary = (N0 == N1);
7144 createPackShuffleMask(VT, Mask, IsUnary);
7148 case X86ISD::VSRLI: {
7149 uint64_t ShiftVal = N.getConstantOperandVal(1);
7150 // Out of range bit shifts are guaranteed to be zero.
7151 if (NumBitsPerElt <= ShiftVal) {
7152 Mask.append(NumElts, SM_SentinelZero);
7156 // We can only decode 'whole byte' bit shifts as shuffles.
7157 if ((ShiftVal % 8) != 0)
7160 uint64_t ByteShift = ShiftVal / 8;
7161 unsigned NumBytes = NumSizeInBits / 8;
7162 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7163 Ops.push_back(N.getOperand(0));
7165 // Clear mask to all zeros and insert the shifted byte indices.
7166 Mask.append(NumBytes, SM_SentinelZero);
7168 if (X86ISD::VSHLI == Opcode) {
7169 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7170 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7171 Mask[i + j] = i + j - ByteShift;
7173 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7174 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7175 Mask[i + j - ByteShift] = i + j;
7179 case X86ISD::VBROADCAST: {
7180 SDValue Src = N.getOperand(0);
7181 MVT SrcVT = Src.getSimpleValueType();
7182 if (!SrcVT.isVector())
7185 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7186 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7187 "Illegal broadcast type");
7188 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
7189 NumSizeInBits / SrcVT.getScalarSizeInBits());
7190 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7191 DAG.getUNDEF(SrcVT), Src,
7192 DAG.getIntPtrConstant(0, SDLoc(N)));
7196 Mask.append(NumElts, 0);
7199 case ISD::ZERO_EXTEND:
7200 case ISD::ANY_EXTEND:
7201 case ISD::ZERO_EXTEND_VECTOR_INREG:
7202 case ISD::ANY_EXTEND_VECTOR_INREG: {
7203 SDValue Src = N.getOperand(0);
7204 EVT SrcVT = Src.getValueType();
7206 // Extended source must be a simple vector.
7207 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7208 (SrcVT.getScalarSizeInBits() % 8) != 0)
7211 unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7213 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7214 DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7217 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7218 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7219 "Illegal zero-extension type");
7220 SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7221 NumSizeInBits / NumSrcBitsPerElt);
7222 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7223 DAG.getUNDEF(SrcVT), Src,
7224 DAG.getIntPtrConstant(0, SDLoc(N)));
7235 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7236 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7237 SmallVectorImpl<int> &Mask) {
7238 int MaskWidth = Mask.size();
7239 SmallVector<SDValue, 16> UsedInputs;
7240 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7241 int lo = UsedInputs.size() * MaskWidth;
7242 int hi = lo + MaskWidth;
7244 // Strip UNDEF input usage.
7245 if (Inputs[i].isUndef())
7247 if ((lo <= M) && (M < hi))
7248 M = SM_SentinelUndef;
7250 // Check for unused inputs.
7251 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7258 // Check for repeated inputs.
7259 bool IsRepeat = false;
7260 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7261 if (UsedInputs[j] != Inputs[i])
7265 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7272 UsedInputs.push_back(Inputs[i]);
7274 Inputs = UsedInputs;
7277 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7278 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7279 /// Returns true if the target shuffle mask was decoded.
7280 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7281 SmallVectorImpl<SDValue> &Inputs,
7282 SmallVectorImpl<int> &Mask,
7283 APInt &KnownUndef, APInt &KnownZero,
7284 SelectionDAG &DAG, unsigned Depth,
7285 bool ResolveKnownElts) {
7286 EVT VT = Op.getValueType();
7287 if (!VT.isSimple() || !VT.isVector())
7290 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7291 if (ResolveKnownElts)
7292 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7295 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7296 ResolveKnownElts)) {
7297 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7303 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7304 SmallVectorImpl<int> &Mask,
7305 SelectionDAG &DAG, unsigned Depth = 0,
7306 bool ResolveKnownElts = true) {
7307 EVT VT = Op.getValueType();
7308 if (!VT.isSimple() || !VT.isVector())
7311 APInt KnownUndef, KnownZero;
7312 unsigned NumElts = Op.getValueType().getVectorNumElements();
7313 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7314 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7315 KnownZero, DAG, Depth, ResolveKnownElts);
7318 /// Returns the scalar element that will make up the ith
7319 /// element of the result of the vector shuffle.
7320 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7323 return SDValue(); // Limit search depth.
7325 SDValue V = SDValue(N, 0);
7326 EVT VT = V.getValueType();
7327 unsigned Opcode = V.getOpcode();
7329 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7330 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7331 int Elt = SV->getMaskElt(Index);
7334 return DAG.getUNDEF(VT.getVectorElementType());
7336 unsigned NumElems = VT.getVectorNumElements();
7337 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7338 : SV->getOperand(1);
7339 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7342 // Recurse into target specific vector shuffles to find scalars.
7343 if (isTargetShuffle(Opcode)) {
7344 MVT ShufVT = V.getSimpleValueType();
7345 MVT ShufSVT = ShufVT.getVectorElementType();
7346 int NumElems = (int)ShufVT.getVectorNumElements();
7347 SmallVector<int, 16> ShuffleMask;
7348 SmallVector<SDValue, 16> ShuffleOps;
7351 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7354 int Elt = ShuffleMask[Index];
7355 if (Elt == SM_SentinelZero)
7356 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7357 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7358 if (Elt == SM_SentinelUndef)
7359 return DAG.getUNDEF(ShufSVT);
7361 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7362 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7363 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7367 // Recurse into insert_subvector base/sub vector to find scalars.
7368 if (Opcode == ISD::INSERT_SUBVECTOR &&
7369 isa<ConstantSDNode>(N->getOperand(2))) {
7370 SDValue Vec = N->getOperand(0);
7371 SDValue Sub = N->getOperand(1);
7372 EVT SubVT = Sub.getValueType();
7373 unsigned NumSubElts = SubVT.getVectorNumElements();
7374 uint64_t SubIdx = N->getConstantOperandVal(2);
7376 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7377 return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7378 return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7381 // Recurse into extract_subvector src vector to find scalars.
7382 if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7383 isa<ConstantSDNode>(N->getOperand(1))) {
7384 SDValue Src = N->getOperand(0);
7385 uint64_t SrcIdx = N->getConstantOperandVal(1);
7386 return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7389 // Actual nodes that may contain scalar elements
7390 if (Opcode == ISD::BITCAST) {
7391 V = V.getOperand(0);
7392 EVT SrcVT = V.getValueType();
7393 unsigned NumElems = VT.getVectorNumElements();
7395 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7399 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7400 return (Index == 0) ? V.getOperand(0)
7401 : DAG.getUNDEF(VT.getVectorElementType());
7403 if (V.getOpcode() == ISD::BUILD_VECTOR)
7404 return V.getOperand(Index);
7409 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7410 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7411 unsigned NumNonZero, unsigned NumZero,
7413 const X86Subtarget &Subtarget) {
7414 MVT VT = Op.getSimpleValueType();
7415 unsigned NumElts = VT.getVectorNumElements();
7416 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7417 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7418 "Illegal vector insertion");
7424 for (unsigned i = 0; i < NumElts; ++i) {
7425 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7429 // If the build vector contains zeros or our first insertion is not the
7430 // first index then insert into zero vector to break any register
7431 // dependency else use SCALAR_TO_VECTOR.
7434 if (NumZero || 0 != i)
7435 V = getZeroVector(VT, Subtarget, DAG, dl);
7437 assert(0 == i && "Expected insertion into zero-index");
7438 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7439 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7440 V = DAG.getBitcast(VT, V);
7444 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7445 DAG.getIntPtrConstant(i, dl));
7451 /// Custom lower build_vector of v16i8.
7452 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7453 unsigned NumNonZero, unsigned NumZero,
7455 const X86Subtarget &Subtarget) {
7456 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7459 // SSE4.1 - use PINSRB to insert each byte directly.
7460 if (Subtarget.hasSSE41())
7461 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7467 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7468 for (unsigned i = 0; i < 16; i += 2) {
7469 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7470 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7471 if (!ThisIsNonZero && !NextIsNonZero)
7474 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7476 if (ThisIsNonZero) {
7477 if (NumZero || NextIsNonZero)
7478 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7480 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7483 if (NextIsNonZero) {
7484 SDValue NextElt = Op.getOperand(i + 1);
7485 if (i == 0 && NumZero)
7486 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7488 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7489 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7490 DAG.getConstant(8, dl, MVT::i8));
7492 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7497 // If our first insertion is not the first index then insert into zero
7498 // vector to break any register dependency else use SCALAR_TO_VECTOR.
7501 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7503 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7504 V = DAG.getBitcast(MVT::v8i16, V);
7508 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7509 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7510 DAG.getIntPtrConstant(i / 2, dl));
7513 return DAG.getBitcast(MVT::v16i8, V);
7516 /// Custom lower build_vector of v8i16.
7517 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7518 unsigned NumNonZero, unsigned NumZero,
7520 const X86Subtarget &Subtarget) {
7521 if (NumNonZero > 4 && !Subtarget.hasSSE41())
7524 // Use PINSRW to insert each byte directly.
7525 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7529 /// Custom lower build_vector of v4i32 or v4f32.
7530 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7531 const X86Subtarget &Subtarget) {
7532 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7533 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7534 // Because we're creating a less complicated build vector here, we may enable
7535 // further folding of the MOVDDUP via shuffle transforms.
7536 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7537 Op.getOperand(0) == Op.getOperand(2) &&
7538 Op.getOperand(1) == Op.getOperand(3) &&
7539 Op.getOperand(0) != Op.getOperand(1)) {
7541 MVT VT = Op.getSimpleValueType();
7542 MVT EltVT = VT.getVectorElementType();
7543 // Create a new build vector with the first 2 elements followed by undef
7544 // padding, bitcast to v2f64, duplicate, and bitcast back.
7545 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7546 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7547 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7548 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7549 return DAG.getBitcast(VT, Dup);
7552 // Find all zeroable elements.
7553 std::bitset<4> Zeroable, Undefs;
7554 for (int i = 0; i < 4; ++i) {
7555 SDValue Elt = Op.getOperand(i);
7556 Undefs[i] = Elt.isUndef();
7557 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7559 assert(Zeroable.size() - Zeroable.count() > 1 &&
7560 "We expect at least two non-zero elements!");
7562 // We only know how to deal with build_vector nodes where elements are either
7563 // zeroable or extract_vector_elt with constant index.
7564 SDValue FirstNonZero;
7565 unsigned FirstNonZeroIdx;
7566 for (unsigned i = 0; i < 4; ++i) {
7569 SDValue Elt = Op.getOperand(i);
7570 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7571 !isa<ConstantSDNode>(Elt.getOperand(1)))
7573 // Make sure that this node is extracting from a 128-bit vector.
7574 MVT VT = Elt.getOperand(0).getSimpleValueType();
7575 if (!VT.is128BitVector())
7577 if (!FirstNonZero.getNode()) {
7579 FirstNonZeroIdx = i;
7583 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7584 SDValue V1 = FirstNonZero.getOperand(0);
7585 MVT VT = V1.getSimpleValueType();
7587 // See if this build_vector can be lowered as a blend with zero.
7589 unsigned EltMaskIdx, EltIdx;
7591 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7592 if (Zeroable[EltIdx]) {
7593 // The zero vector will be on the right hand side.
7594 Mask[EltIdx] = EltIdx+4;
7598 Elt = Op->getOperand(EltIdx);
7599 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7600 EltMaskIdx = Elt.getConstantOperandVal(1);
7601 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7603 Mask[EltIdx] = EltIdx;
7607 // Let the shuffle legalizer deal with blend operations.
7608 SDValue VZeroOrUndef = (Zeroable == Undefs)
7610 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7611 if (V1.getSimpleValueType() != VT)
7612 V1 = DAG.getBitcast(VT, V1);
7613 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7616 // See if we can lower this build_vector to a INSERTPS.
7617 if (!Subtarget.hasSSE41())
7620 SDValue V2 = Elt.getOperand(0);
7621 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7624 bool CanFold = true;
7625 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7629 SDValue Current = Op->getOperand(i);
7630 SDValue SrcVector = Current->getOperand(0);
7633 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7639 assert(V1.getNode() && "Expected at least two non-zero elements!");
7640 if (V1.getSimpleValueType() != MVT::v4f32)
7641 V1 = DAG.getBitcast(MVT::v4f32, V1);
7642 if (V2.getSimpleValueType() != MVT::v4f32)
7643 V2 = DAG.getBitcast(MVT::v4f32, V2);
7645 // Ok, we can emit an INSERTPS instruction.
7646 unsigned ZMask = Zeroable.to_ulong();
7648 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7649 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7651 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7652 DAG.getIntPtrConstant(InsertPSMask, DL, true));
7653 return DAG.getBitcast(VT, Result);
7656 /// Return a vector logical shift node.
7657 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7658 SelectionDAG &DAG, const TargetLowering &TLI,
7660 assert(VT.is128BitVector() && "Unknown type for VShift");
7661 MVT ShVT = MVT::v16i8;
7662 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7663 SrcOp = DAG.getBitcast(ShVT, SrcOp);
7664 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7665 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
7666 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7669 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7670 SelectionDAG &DAG) {
7672 // Check if the scalar load can be widened into a vector load. And if
7673 // the address is "base + cst" see if the cst can be "absorbed" into
7674 // the shuffle mask.
7675 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7676 SDValue Ptr = LD->getBasePtr();
7677 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
7679 EVT PVT = LD->getValueType(0);
7680 if (PVT != MVT::i32 && PVT != MVT::f32)
7685 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7686 FI = FINode->getIndex();
7688 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7689 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7690 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7691 Offset = Ptr.getConstantOperandVal(1);
7692 Ptr = Ptr.getOperand(0);
7697 // FIXME: 256-bit vector instructions don't require a strict alignment,
7698 // improve this code to support it better.
7699 unsigned RequiredAlign = VT.getSizeInBits()/8;
7700 SDValue Chain = LD->getChain();
7701 // Make sure the stack object alignment is at least 16 or 32.
7702 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7703 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7704 if (MFI.isFixedObjectIndex(FI)) {
7705 // Can't change the alignment. FIXME: It's possible to compute
7706 // the exact stack offset and reference FI + adjust offset instead.
7707 // If someone *really* cares about this. That's the way to implement it.
7710 MFI.setObjectAlignment(FI, RequiredAlign);
7714 // (Offset % 16 or 32) must be multiple of 4. Then address is then
7715 // Ptr + (Offset & ~15).
7718 if ((Offset % RequiredAlign) & 3)
7720 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7723 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7724 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7727 int EltNo = (Offset - StartOffset) >> 2;
7728 unsigned NumElems = VT.getVectorNumElements();
7730 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7731 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
7732 LD->getPointerInfo().getWithOffset(StartOffset));
7734 SmallVector<int, 8> Mask(NumElems, EltNo);
7736 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
7742 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
7743 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
7744 if (ISD::isNON_EXTLoad(Elt.getNode())) {
7745 auto *BaseLd = cast<LoadSDNode>(Elt);
7746 if (!BaseLd->isSimple())
7753 switch (Elt.getOpcode()) {
7756 case ISD::SCALAR_TO_VECTOR:
7757 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
7759 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
7760 uint64_t Idx = Elt.getConstantOperandVal(1);
7761 if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
7762 ByteOffset += Idx / 8;
7767 case ISD::EXTRACT_VECTOR_ELT:
7768 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
7769 SDValue Src = Elt.getOperand(0);
7770 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
7771 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
7772 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
7773 findEltLoadSrc(Src, Ld, ByteOffset)) {
7774 uint64_t Idx = Elt.getConstantOperandVal(1);
7775 ByteOffset += Idx * (SrcSizeInBits / 8);
7785 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
7786 /// elements can be replaced by a single large load which has the same value as
7787 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
7789 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
7790 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7791 const SDLoc &DL, SelectionDAG &DAG,
7792 const X86Subtarget &Subtarget,
7793 bool isAfterLegalize) {
7794 if ((VT.getScalarSizeInBits() % 8) != 0)
7797 unsigned NumElems = Elts.size();
7799 int LastLoadedElt = -1;
7800 APInt LoadMask = APInt::getNullValue(NumElems);
7801 APInt ZeroMask = APInt::getNullValue(NumElems);
7802 APInt UndefMask = APInt::getNullValue(NumElems);
7804 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
7805 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
7807 // For each element in the initializer, see if we've found a load, zero or an
7809 for (unsigned i = 0; i < NumElems; ++i) {
7810 SDValue Elt = peekThroughBitcasts(Elts[i]);
7813 if (Elt.isUndef()) {
7814 UndefMask.setBit(i);
7817 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
7822 // Each loaded element must be the correct fractional portion of the
7823 // requested vector load.
7824 unsigned EltSizeInBits = Elt.getValueSizeInBits();
7825 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
7828 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
7830 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
7831 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
7837 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
7838 LoadMask.countPopulation()) == NumElems &&
7839 "Incomplete element masks");
7841 // Handle Special Cases - all undef or undef/zero.
7842 if (UndefMask.countPopulation() == NumElems)
7843 return DAG.getUNDEF(VT);
7845 // FIXME: Should we return this as a BUILD_VECTOR instead?
7846 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
7847 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
7848 : DAG.getConstantFP(0.0, DL, VT);
7850 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7851 int FirstLoadedElt = LoadMask.countTrailingZeros();
7852 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
7853 EVT EltBaseVT = EltBase.getValueType();
7854 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
7855 "Register/Memory size mismatch");
7856 LoadSDNode *LDBase = Loads[FirstLoadedElt];
7857 assert(LDBase && "Did not find base load for merging consecutive loads");
7858 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
7859 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
7860 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
7861 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
7863 // TODO: Support offsetting the base load.
7864 if (ByteOffsets[FirstLoadedElt] != 0)
7867 // Check to see if the element's load is consecutive to the base load
7868 // or offset from a previous (already checked) load.
7869 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
7870 LoadSDNode *Ld = Loads[EltIdx];
7871 int64_t ByteOffset = ByteOffsets[EltIdx];
7872 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
7873 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
7874 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
7875 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
7877 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
7878 EltIdx - FirstLoadedElt);
7881 // Consecutive loads can contain UNDEFS but not ZERO elements.
7882 // Consecutive loads with UNDEFs and ZEROs elements require a
7883 // an additional shuffle stage to clear the ZERO elements.
7884 bool IsConsecutiveLoad = true;
7885 bool IsConsecutiveLoadWithZeros = true;
7886 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
7888 if (!CheckConsecutiveLoad(LDBase, i)) {
7889 IsConsecutiveLoad = false;
7890 IsConsecutiveLoadWithZeros = false;
7893 } else if (ZeroMask[i]) {
7894 IsConsecutiveLoad = false;
7898 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7899 auto MMOFlags = LDBase->getMemOperand()->getFlags();
7900 assert(LDBase->isSimple() &&
7901 "Cannot merge volatile or atomic loads.");
7903 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
7904 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
7905 for (auto *LD : Loads)
7907 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
7911 // Check if the base load is entirely dereferenceable.
7912 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
7913 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
7915 // LOAD - all consecutive load/undefs (must start/end with a load or be
7916 // entirely dereferenceable). If we have found an entire vector of loads and
7917 // undefs, then return a large load of the entire vector width starting at the
7918 // base pointer. If the vector contains zeros, then attempt to shuffle those
7920 if (FirstLoadedElt == 0 &&
7921 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
7922 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
7923 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
7926 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
7927 // will lower to regular temporal loads and use the cache.
7928 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
7929 VT.is256BitVector() && !Subtarget.hasInt256())
7933 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
7936 return CreateLoad(VT, LDBase);
7938 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
7939 // vector and a zero vector to clear out the zero elements.
7940 if (!isAfterLegalize && VT.isVector()) {
7941 unsigned NumMaskElts = VT.getVectorNumElements();
7942 if ((NumMaskElts % NumElems) == 0) {
7943 unsigned Scale = NumMaskElts / NumElems;
7944 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
7945 for (unsigned i = 0; i < NumElems; ++i) {
7948 int Offset = ZeroMask[i] ? NumMaskElts : 0;
7949 for (unsigned j = 0; j != Scale; ++j)
7950 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
7952 SDValue V = CreateLoad(VT, LDBase);
7953 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
7954 : DAG.getConstantFP(0.0, DL, VT);
7955 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
7960 // If the upper half of a ymm/zmm load is undef then just load the lower half.
7961 if (VT.is256BitVector() || VT.is512BitVector()) {
7962 unsigned HalfNumElems = NumElems / 2;
7963 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
7965 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
7967 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
7968 DAG, Subtarget, isAfterLegalize);
7970 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
7971 HalfLD, DAG.getIntPtrConstant(0, DL));
7975 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
7976 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
7977 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
7978 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
7979 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
7980 : MVT::getIntegerVT(LoadSizeInBits);
7981 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
7982 if (TLI.isTypeLegal(VecVT)) {
7983 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
7984 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
7986 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
7987 LDBase->getPointerInfo(),
7988 LDBase->getAlignment(),
7989 MachineMemOperand::MOLoad);
7990 for (auto *LD : Loads)
7992 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
7993 return DAG.getBitcast(VT, ResNode);
7997 // BROADCAST - match the smallest possible repetition pattern, load that
7998 // scalar/subvector element and then broadcast to the entire vector.
7999 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8000 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8001 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8002 unsigned RepeatSize = SubElems * BaseSizeInBits;
8003 unsigned ScalarSize = std::min(RepeatSize, 64u);
8004 if (!Subtarget.hasAVX2() && ScalarSize < 32)
8008 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8009 for (unsigned i = 0; i != NumElems && Match; ++i) {
8012 SDValue Elt = peekThroughBitcasts(Elts[i]);
8013 if (RepeatedLoads[i % SubElems].isUndef())
8014 RepeatedLoads[i % SubElems] = Elt;
8016 Match &= (RepeatedLoads[i % SubElems] == Elt);
8019 // We must have loads at both ends of the repetition.
8020 Match &= !RepeatedLoads.front().isUndef();
8021 Match &= !RepeatedLoads.back().isUndef();
8026 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8027 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8028 : EVT::getFloatingPointVT(ScalarSize);
8029 if (RepeatSize > ScalarSize)
8030 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8031 RepeatSize / ScalarSize);
8033 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8034 VT.getSizeInBits() / ScalarSize);
8035 if (TLI.isTypeLegal(BroadcastVT)) {
8036 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8037 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8038 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8039 : X86ISD::VBROADCAST;
8040 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8041 return DAG.getBitcast(VT, Broadcast);
8050 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8051 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8052 // are consecutive, non-overlapping, and in the right order.
8053 static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
8055 const X86Subtarget &Subtarget,
8056 bool isAfterLegalize) {
8057 SmallVector<SDValue, 64> Elts;
8058 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8059 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
8060 Elts.push_back(Elt);
8065 assert(Elts.size() == VT.getVectorNumElements());
8066 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8070 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8071 unsigned SplatBitSize, LLVMContext &C) {
8072 unsigned ScalarSize = VT.getScalarSizeInBits();
8073 unsigned NumElm = SplatBitSize / ScalarSize;
8075 SmallVector<Constant *, 32> ConstantVec;
8076 for (unsigned i = 0; i < NumElm; i++) {
8077 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8079 if (VT.isFloatingPoint()) {
8080 if (ScalarSize == 32) {
8081 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8083 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8084 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8087 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8088 ConstantVec.push_back(Const);
8090 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8093 static bool isFoldableUseOfShuffle(SDNode *N) {
8094 for (auto *U : N->uses()) {
8095 unsigned Opc = U->getOpcode();
8096 // VPERMV/VPERMV3 shuffles can never fold their index operands.
8097 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8099 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8101 if (isTargetShuffle(Opc))
8103 if (Opc == ISD::BITCAST) // Ignore bitcasts
8104 return isFoldableUseOfShuffle(U);
8111 // Check if the current node of build vector is a zero extended vector.
8112 // // If so, return the value extended.
8113 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8114 // // NumElt - return the number of zero extended identical values.
8115 // // EltType - return the type of the value include the zero extend.
8116 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8117 unsigned &NumElt, MVT &EltType) {
8118 SDValue ExtValue = Op->getOperand(0);
8119 unsigned NumElts = Op->getNumOperands();
8120 unsigned Delta = NumElts;
8122 for (unsigned i = 1; i < NumElts; i++) {
8123 if (Op->getOperand(i) == ExtValue) {
8127 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8130 if (!isPowerOf2_32(Delta) || Delta == 1)
8133 for (unsigned i = Delta; i < NumElts; i++) {
8134 if (i % Delta == 0) {
8135 if (Op->getOperand(i) != ExtValue)
8137 } else if (!(isNullConstant(Op->getOperand(i)) ||
8138 Op->getOperand(i).isUndef()))
8141 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8142 unsigned ExtVTSize = EltSize * Delta;
8143 EltType = MVT::getIntegerVT(ExtVTSize);
8144 NumElt = NumElts / Delta;
8148 /// Attempt to use the vbroadcast instruction to generate a splat value
8149 /// from a splat BUILD_VECTOR which uses:
8150 /// a. A single scalar load, or a constant.
8151 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8153 /// The VBROADCAST node is returned when a pattern is found,
8154 /// or SDValue() otherwise.
8155 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8156 const X86Subtarget &Subtarget,
8157 SelectionDAG &DAG) {
8158 // VBROADCAST requires AVX.
8159 // TODO: Splats could be generated for non-AVX CPUs using SSE
8160 // instructions, but there's less potential gain for only 128-bit vectors.
8161 if (!Subtarget.hasAVX())
8164 MVT VT = BVOp->getSimpleValueType(0);
8167 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8168 "Unsupported vector type for broadcast.");
8170 BitVector UndefElements;
8171 SDValue Ld = BVOp->getSplatValue(&UndefElements);
8173 // Attempt to use VBROADCASTM
8174 // From this paterrn:
8175 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8176 // b. t1 = (build_vector t0 t0)
8178 // Create (VBROADCASTM v2i1 X)
8179 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8180 MVT EltType = VT.getScalarType();
8181 unsigned NumElts = VT.getVectorNumElements();
8183 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8184 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8185 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8186 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8188 BOperand = ZeroExtended.getOperand(0);
8190 BOperand = Ld.getOperand(0).getOperand(0);
8191 MVT MaskVT = BOperand.getSimpleValueType();
8192 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8193 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8195 DAG.getNode(X86ISD::VBROADCASTM, dl,
8196 MVT::getVectorVT(EltType, NumElts), BOperand);
8197 return DAG.getBitcast(VT, Brdcst);
8202 unsigned NumElts = VT.getVectorNumElements();
8203 unsigned NumUndefElts = UndefElements.count();
8204 if (!Ld || (NumElts - NumUndefElts) <= 1) {
8205 APInt SplatValue, Undef;
8206 unsigned SplatBitSize;
8208 // Check if this is a repeated constant pattern suitable for broadcasting.
8209 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8210 SplatBitSize > VT.getScalarSizeInBits() &&
8211 SplatBitSize < VT.getSizeInBits()) {
8212 // Avoid replacing with broadcast when it's a use of a shuffle
8213 // instruction to preserve the present custom lowering of shuffles.
8214 if (isFoldableUseOfShuffle(BVOp))
8216 // replace BUILD_VECTOR with broadcast of the repeated constants.
8217 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8218 LLVMContext *Ctx = DAG.getContext();
8219 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8220 if (Subtarget.hasAVX()) {
8221 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
8222 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
8223 // Splatted value can fit in one INTEGER constant in constant pool.
8224 // Load the constant and broadcast it.
8225 MVT CVT = MVT::getIntegerVT(SplatBitSize);
8226 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8227 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8228 SDValue CP = DAG.getConstantPool(C, PVT);
8229 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8231 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8233 CVT, dl, DAG.getEntryNode(), CP,
8234 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8236 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8237 MVT::getVectorVT(CVT, Repeat), Ld);
8238 return DAG.getBitcast(VT, Brdcst);
8239 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
8240 // Splatted value can fit in one FLOAT constant in constant pool.
8241 // Load the constant and broadcast it.
8242 // AVX have support for 32 and 64 bit broadcast for floats only.
8243 // No 64bit integer in 32bit subtarget.
8244 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
8245 // Lower the splat via APFloat directly, to avoid any conversion.
8248 ? ConstantFP::get(*Ctx,
8249 APFloat(APFloat::IEEEsingle(), SplatValue))
8250 : ConstantFP::get(*Ctx,
8251 APFloat(APFloat::IEEEdouble(), SplatValue));
8252 SDValue CP = DAG.getConstantPool(C, PVT);
8253 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8255 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8257 CVT, dl, DAG.getEntryNode(), CP,
8258 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8260 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8261 MVT::getVectorVT(CVT, Repeat), Ld);
8262 return DAG.getBitcast(VT, Brdcst);
8263 } else if (SplatBitSize > 64) {
8264 // Load the vector of constants and broadcast it.
8265 MVT CVT = VT.getScalarType();
8266 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8268 SDValue VCP = DAG.getConstantPool(VecC, PVT);
8269 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8270 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
8272 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8273 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8275 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8276 return DAG.getBitcast(VT, Brdcst);
8281 // If we are moving a scalar into a vector (Ld must be set and all elements
8282 // but 1 are undef) and that operation is not obviously supported by
8283 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8284 // That's better than general shuffling and may eliminate a load to GPR and
8285 // move from scalar to vector register.
8286 if (!Ld || NumElts - NumUndefElts != 1)
8288 unsigned ScalarSize = Ld.getValueSizeInBits();
8289 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8293 bool ConstSplatVal =
8294 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8296 // Make sure that all of the users of a non-constant load are from the
8297 // BUILD_VECTOR node.
8298 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8301 unsigned ScalarSize = Ld.getValueSizeInBits();
8302 bool IsGE256 = (VT.getSizeInBits() >= 256);
8304 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8305 // instruction to save 8 or more bytes of constant pool data.
8306 // TODO: If multiple splats are generated to load the same constant,
8307 // it may be detrimental to overall size. There needs to be a way to detect
8308 // that condition to know if this is truly a size win.
8309 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
8311 // Handle broadcasting a single constant scalar from the constant pool
8313 // On Sandybridge (no AVX2), it is still better to load a constant vector
8314 // from the constant pool and not to broadcast it from a scalar.
8315 // But override that restriction when optimizing for size.
8316 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8317 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8318 EVT CVT = Ld.getValueType();
8319 assert(!CVT.isVector() && "Must not broadcast a vector type");
8321 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8322 // For size optimization, also splat v2f64 and v2i64, and for size opt
8323 // with AVX2, also splat i8 and i16.
8324 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8325 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8326 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8327 const Constant *C = nullptr;
8328 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8329 C = CI->getConstantIntValue();
8330 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8331 C = CF->getConstantFPValue();
8333 assert(C && "Invalid constant type");
8335 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8337 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8338 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8340 CVT, dl, DAG.getEntryNode(), CP,
8341 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8344 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8348 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8350 // Handle AVX2 in-register broadcasts.
8351 if (!IsLoad && Subtarget.hasInt256() &&
8352 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8353 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8355 // The scalar source must be a normal load.
8359 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8360 (Subtarget.hasVLX() && ScalarSize == 64))
8361 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8363 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8364 // double since there is no vbroadcastsd xmm
8365 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8366 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8367 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8370 // Unsupported broadcast.
8374 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8375 /// underlying vector and index.
8377 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8379 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8381 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8382 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8385 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8387 // (extract_vector_elt (v8f32 %1), Constant<6>)
8389 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8390 // (extract_subvector (v8f32 %0), Constant<4>),
8393 // In this case the vector is the extract_subvector expression and the index
8394 // is 2, as specified by the shuffle.
8395 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8396 SDValue ShuffleVec = SVOp->getOperand(0);
8397 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8398 assert(ShuffleVecVT.getVectorElementType() ==
8399 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8401 int ShuffleIdx = SVOp->getMaskElt(Idx);
8402 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8403 ExtractedFromVec = ShuffleVec;
8409 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8410 MVT VT = Op.getSimpleValueType();
8412 // Skip if insert_vec_elt is not supported.
8413 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8414 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8418 unsigned NumElems = Op.getNumOperands();
8422 SmallVector<unsigned, 4> InsertIndices;
8423 SmallVector<int, 8> Mask(NumElems, -1);
8425 for (unsigned i = 0; i != NumElems; ++i) {
8426 unsigned Opc = Op.getOperand(i).getOpcode();
8428 if (Opc == ISD::UNDEF)
8431 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8432 // Quit if more than 1 elements need inserting.
8433 if (InsertIndices.size() > 1)
8436 InsertIndices.push_back(i);
8440 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8441 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8443 // Quit if non-constant index.
8444 if (!isa<ConstantSDNode>(ExtIdx))
8446 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8448 // Quit if extracted from vector of different type.
8449 if (ExtractedFromVec.getValueType() != VT)
8452 if (!VecIn1.getNode())
8453 VecIn1 = ExtractedFromVec;
8454 else if (VecIn1 != ExtractedFromVec) {
8455 if (!VecIn2.getNode())
8456 VecIn2 = ExtractedFromVec;
8457 else if (VecIn2 != ExtractedFromVec)
8458 // Quit if more than 2 vectors to shuffle
8462 if (ExtractedFromVec == VecIn1)
8464 else if (ExtractedFromVec == VecIn2)
8465 Mask[i] = Idx + NumElems;
8468 if (!VecIn1.getNode())
8471 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8472 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8474 for (unsigned Idx : InsertIndices)
8475 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8476 DAG.getIntPtrConstant(Idx, DL));
8481 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8482 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8483 Op.getScalarValueSizeInBits() == 1 &&
8484 "Can not convert non-constant vector");
8485 uint64_t Immediate = 0;
8486 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8487 SDValue In = Op.getOperand(idx);
8489 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8492 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8493 return DAG.getConstant(Immediate, dl, VT);
8495 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8496 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8497 const X86Subtarget &Subtarget) {
8499 MVT VT = Op.getSimpleValueType();
8500 assert((VT.getVectorElementType() == MVT::i1) &&
8501 "Unexpected type in LowerBUILD_VECTORvXi1!");
8504 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8505 ISD::isBuildVectorAllOnes(Op.getNode()))
8508 uint64_t Immediate = 0;
8509 SmallVector<unsigned, 16> NonConstIdx;
8510 bool IsSplat = true;
8511 bool HasConstElts = false;
8513 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8514 SDValue In = Op.getOperand(idx);
8517 if (!isa<ConstantSDNode>(In))
8518 NonConstIdx.push_back(idx);
8520 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8521 HasConstElts = true;
8525 else if (In != Op.getOperand(SplatIdx))
8529 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8531 // The build_vector allows the scalar element to be larger than the vector
8532 // element type. We need to mask it to use as a condition unless we know
8533 // the upper bits are zero.
8534 // FIXME: Use computeKnownBits instead of checking specific opcode?
8535 SDValue Cond = Op.getOperand(SplatIdx);
8536 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
8537 if (Cond.getOpcode() != ISD::SETCC)
8538 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
8539 DAG.getConstant(1, dl, MVT::i8));
8540 return DAG.getSelect(dl, VT, Cond,
8541 DAG.getConstant(1, dl, VT),
8542 DAG.getConstant(0, dl, VT));
8545 // insert elements one by one
8548 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8549 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
8550 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
8551 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
8552 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
8553 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
8555 MVT ImmVT = MVT::getIntegerVT(std::max(VT.getSizeInBits(), 8U));
8556 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
8557 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
8558 DstVec = DAG.getBitcast(VecVT, Imm);
8559 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
8560 DAG.getIntPtrConstant(0, dl));
8563 DstVec = DAG.getUNDEF(VT);
8565 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8566 unsigned InsertIdx = NonConstIdx[i];
8567 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8568 Op.getOperand(InsertIdx),
8569 DAG.getIntPtrConstant(InsertIdx, dl));
8574 /// This is a helper function of LowerToHorizontalOp().
8575 /// This function checks that the build_vector \p N in input implements a
8576 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8577 /// may not match the layout of an x86 256-bit horizontal instruction.
8578 /// In other words, if this returns true, then some extraction/insertion will
8579 /// be required to produce a valid horizontal instruction.
8581 /// Parameter \p Opcode defines the kind of horizontal operation to match.
8582 /// For example, if \p Opcode is equal to ISD::ADD, then this function
8583 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8584 /// is equal to ISD::SUB, then this function checks if this is a horizontal
8587 /// This function only analyzes elements of \p N whose indices are
8588 /// in range [BaseIdx, LastIdx).
8590 /// TODO: This function was originally used to match both real and fake partial
8591 /// horizontal operations, but the index-matching logic is incorrect for that.
8592 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
8593 /// code because it is only used for partial h-op matching now?
8594 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8596 unsigned BaseIdx, unsigned LastIdx,
8597 SDValue &V0, SDValue &V1) {
8598 EVT VT = N->getValueType(0);
8599 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8600 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8601 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8602 "Invalid Vector in input!");
8604 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8605 bool CanFold = true;
8606 unsigned ExpectedVExtractIdx = BaseIdx;
8607 unsigned NumElts = LastIdx - BaseIdx;
8608 V0 = DAG.getUNDEF(VT);
8609 V1 = DAG.getUNDEF(VT);
8611 // Check if N implements a horizontal binop.
8612 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8613 SDValue Op = N->getOperand(i + BaseIdx);
8616 if (Op->isUndef()) {
8617 // Update the expected vector extract index.
8618 if (i * 2 == NumElts)
8619 ExpectedVExtractIdx = BaseIdx;
8620 ExpectedVExtractIdx += 2;
8624 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8629 SDValue Op0 = Op.getOperand(0);
8630 SDValue Op1 = Op.getOperand(1);
8632 // Try to match the following pattern:
8633 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8634 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8635 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8636 Op0.getOperand(0) == Op1.getOperand(0) &&
8637 isa<ConstantSDNode>(Op0.getOperand(1)) &&
8638 isa<ConstantSDNode>(Op1.getOperand(1)));
8642 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8643 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8645 if (i * 2 < NumElts) {
8647 V0 = Op0.getOperand(0);
8648 if (V0.getValueType() != VT)
8653 V1 = Op0.getOperand(0);
8654 if (V1.getValueType() != VT)
8657 if (i * 2 == NumElts)
8658 ExpectedVExtractIdx = BaseIdx;
8661 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8662 if (I0 == ExpectedVExtractIdx)
8663 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8664 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8665 // Try to match the following dag sequence:
8666 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8667 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8671 ExpectedVExtractIdx += 2;
8677 /// Emit a sequence of two 128-bit horizontal add/sub followed by
8678 /// a concat_vector.
8680 /// This is a helper function of LowerToHorizontalOp().
8681 /// This function expects two 256-bit vectors called V0 and V1.
8682 /// At first, each vector is split into two separate 128-bit vectors.
8683 /// Then, the resulting 128-bit vectors are used to implement two
8684 /// horizontal binary operations.
8686 /// The kind of horizontal binary operation is defined by \p X86Opcode.
8688 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8689 /// the two new horizontal binop.
8690 /// When Mode is set, the first horizontal binop dag node would take as input
8691 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8692 /// horizontal binop dag node would take as input the lower 128-bit of V1
8693 /// and the upper 128-bit of V1.
8695 /// HADD V0_LO, V0_HI
8696 /// HADD V1_LO, V1_HI
8698 /// Otherwise, the first horizontal binop dag node takes as input the lower
8699 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8700 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8702 /// HADD V0_LO, V1_LO
8703 /// HADD V0_HI, V1_HI
8705 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8706 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8707 /// the upper 128-bits of the result.
8708 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8709 const SDLoc &DL, SelectionDAG &DAG,
8710 unsigned X86Opcode, bool Mode,
8711 bool isUndefLO, bool isUndefHI) {
8712 MVT VT = V0.getSimpleValueType();
8713 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8714 "Invalid nodes in input!");
8716 unsigned NumElts = VT.getVectorNumElements();
8717 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8718 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8719 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8720 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8721 MVT NewVT = V0_LO.getSimpleValueType();
8723 SDValue LO = DAG.getUNDEF(NewVT);
8724 SDValue HI = DAG.getUNDEF(NewVT);
8727 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8728 if (!isUndefLO && !V0->isUndef())
8729 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8730 if (!isUndefHI && !V1->isUndef())
8731 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
8733 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8734 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
8735 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
8737 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
8738 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
8741 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
8744 /// Returns true iff \p BV builds a vector with the result equivalent to
8745 /// the result of ADDSUB/SUBADD operation.
8746 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
8747 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
8748 /// \p Opnd0 and \p Opnd1.
8749 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
8750 const X86Subtarget &Subtarget, SelectionDAG &DAG,
8751 SDValue &Opnd0, SDValue &Opnd1,
8752 unsigned &NumExtracts,
8755 MVT VT = BV->getSimpleValueType(0);
8756 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
8759 unsigned NumElts = VT.getVectorNumElements();
8760 SDValue InVec0 = DAG.getUNDEF(VT);
8761 SDValue InVec1 = DAG.getUNDEF(VT);
8765 // Odd-numbered elements in the input build vector are obtained from
8766 // adding/subtracting two integer/float elements.
8767 // Even-numbered elements in the input build vector are obtained from
8768 // subtracting/adding two integer/float elements.
8769 unsigned Opc[2] = {0, 0};
8770 for (unsigned i = 0, e = NumElts; i != e; ++i) {
8771 SDValue Op = BV->getOperand(i);
8773 // Skip 'undef' values.
8774 unsigned Opcode = Op.getOpcode();
8775 if (Opcode == ISD::UNDEF)
8778 // Early exit if we found an unexpected opcode.
8779 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
8782 SDValue Op0 = Op.getOperand(0);
8783 SDValue Op1 = Op.getOperand(1);
8785 // Try to match the following pattern:
8786 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
8787 // Early exit if we cannot match that sequence.
8788 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8789 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8790 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8791 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
8792 Op0.getOperand(1) != Op1.getOperand(1))
8795 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8799 // We found a valid add/sub node, make sure its the same opcode as previous
8800 // elements for this parity.
8801 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
8803 Opc[i % 2] = Opcode;
8805 // Update InVec0 and InVec1.
8806 if (InVec0.isUndef()) {
8807 InVec0 = Op0.getOperand(0);
8808 if (InVec0.getSimpleValueType() != VT)
8811 if (InVec1.isUndef()) {
8812 InVec1 = Op1.getOperand(0);
8813 if (InVec1.getSimpleValueType() != VT)
8817 // Make sure that operands in input to each add/sub node always
8818 // come from a same pair of vectors.
8819 if (InVec0 != Op0.getOperand(0)) {
8820 if (Opcode == ISD::FSUB)
8823 // FADD is commutable. Try to commute the operands
8824 // and then test again.
8825 std::swap(Op0, Op1);
8826 if (InVec0 != Op0.getOperand(0))
8830 if (InVec1 != Op1.getOperand(0))
8833 // Increment the number of extractions done.
8837 // Ensure we have found an opcode for both parities and that they are
8838 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
8839 // inputs are undef.
8840 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
8841 InVec0.isUndef() || InVec1.isUndef())
8844 IsSubAdd = Opc[0] == ISD::FADD;
8851 /// Returns true if is possible to fold MUL and an idiom that has already been
8852 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
8853 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
8854 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
8856 /// Prior to calling this function it should be known that there is some
8857 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
8858 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
8859 /// before replacement of such SDNode with ADDSUB operation. Thus the number
8860 /// of \p Opnd0 uses is expected to be equal to 2.
8861 /// For example, this function may be called for the following IR:
8862 /// %AB = fmul fast <2 x double> %A, %B
8863 /// %Sub = fsub fast <2 x double> %AB, %C
8864 /// %Add = fadd fast <2 x double> %AB, %C
8865 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
8866 /// <2 x i32> <i32 0, i32 3>
8867 /// There is a def for %Addsub here, which potentially can be replaced by
8868 /// X86ISD::ADDSUB operation:
8869 /// %Addsub = X86ISD::ADDSUB %AB, %C
8870 /// and such ADDSUB can further be replaced with FMADDSUB:
8871 /// %Addsub = FMADDSUB %A, %B, %C.
8873 /// The main reason why this method is called before the replacement of the
8874 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
8875 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
8877 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
8879 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
8880 unsigned ExpectedUses) {
8881 if (Opnd0.getOpcode() != ISD::FMUL ||
8882 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
8885 // FIXME: These checks must match the similar ones in
8886 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
8887 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
8888 // or MUL + ADDSUB to FMADDSUB.
8889 const TargetOptions &Options = DAG.getTarget().Options;
8891 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
8896 Opnd1 = Opnd0.getOperand(1);
8897 Opnd0 = Opnd0.getOperand(0);
8902 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
8903 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
8904 /// X86ISD::FMSUBADD node.
8905 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
8906 const X86Subtarget &Subtarget,
8907 SelectionDAG &DAG) {
8908 SDValue Opnd0, Opnd1;
8909 unsigned NumExtracts;
8911 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
8915 MVT VT = BV->getSimpleValueType(0);
8918 // Try to generate X86ISD::FMADDSUB node here.
8920 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
8921 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
8922 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
8925 // We only support ADDSUB.
8929 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
8930 // the ADDSUB idiom has been successfully recognized. There are no known
8931 // X86 targets with 512-bit ADDSUB instructions!
8932 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
8934 if (VT.is512BitVector())
8937 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
8940 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
8941 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
8942 // Initialize outputs to known values.
8943 MVT VT = BV->getSimpleValueType(0);
8944 HOpcode = ISD::DELETED_NODE;
8945 V0 = DAG.getUNDEF(VT);
8946 V1 = DAG.getUNDEF(VT);
8948 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
8949 // half of the result is calculated independently from the 128-bit halves of
8950 // the inputs, so that makes the index-checking logic below more complicated.
8951 unsigned NumElts = VT.getVectorNumElements();
8952 unsigned GenericOpcode = ISD::DELETED_NODE;
8953 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
8954 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
8955 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
8956 for (unsigned i = 0; i != Num128BitChunks; ++i) {
8957 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
8958 // Ignore undef elements.
8959 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
8963 // If there's an opcode mismatch, we're done.
8964 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
8967 // Initialize horizontal opcode.
8968 if (HOpcode == ISD::DELETED_NODE) {
8969 GenericOpcode = Op.getOpcode();
8970 switch (GenericOpcode) {
8971 case ISD::ADD: HOpcode = X86ISD::HADD; break;
8972 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
8973 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
8974 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8975 default: return false;
8979 SDValue Op0 = Op.getOperand(0);
8980 SDValue Op1 = Op.getOperand(1);
8981 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8982 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8983 Op0.getOperand(0) != Op1.getOperand(0) ||
8984 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8985 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8988 // The source vector is chosen based on which 64-bit half of the
8989 // destination vector is being calculated.
8990 if (j < NumEltsIn64Bits) {
8992 V0 = Op0.getOperand(0);
8995 V1 = Op0.getOperand(0);
8998 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8999 if (SourceVec != Op0.getOperand(0))
9002 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9003 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9004 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9005 unsigned ExpectedIndex = i * NumEltsIn128Bits +
9006 (j % NumEltsIn64Bits) * 2;
9007 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9010 // If this is not a commutative op, this does not match.
9011 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9014 // Addition is commutative, so try swapping the extract indexes.
9015 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9016 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9019 // Extract indexes do not match horizontal requirement.
9023 // We matched. Opcode and operands are returned by reference as arguments.
9027 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9028 SelectionDAG &DAG, unsigned HOpcode,
9029 SDValue V0, SDValue V1) {
9030 // If either input vector is not the same size as the build vector,
9031 // extract/insert the low bits to the correct size.
9032 // This is free (examples: zmm --> xmm, xmm --> ymm).
9033 MVT VT = BV->getSimpleValueType(0);
9034 unsigned Width = VT.getSizeInBits();
9035 if (V0.getValueSizeInBits() > Width)
9036 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9037 else if (V0.getValueSizeInBits() < Width)
9038 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9040 if (V1.getValueSizeInBits() > Width)
9041 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9042 else if (V1.getValueSizeInBits() < Width)
9043 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9045 unsigned NumElts = VT.getVectorNumElements();
9046 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9047 for (unsigned i = 0; i != NumElts; ++i)
9048 if (BV->getOperand(i).isUndef())
9049 DemandedElts.clearBit(i);
9051 // If we don't need the upper xmm, then perform as a xmm hop.
9052 unsigned HalfNumElts = NumElts / 2;
9053 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9054 MVT HalfVT = VT.getHalfNumVectorElementsVT();
9055 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9056 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9057 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9058 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9061 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9064 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9065 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9066 const X86Subtarget &Subtarget,
9067 SelectionDAG &DAG) {
9068 // We need at least 2 non-undef elements to make this worthwhile by default.
9069 unsigned NumNonUndefs =
9070 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9071 if (NumNonUndefs < 2)
9074 // There are 4 sets of horizontal math operations distinguished by type:
9075 // int/FP at 128-bit/256-bit. Each type was introduced with a different
9076 // subtarget feature. Try to match those "native" patterns first.
9077 MVT VT = BV->getSimpleValueType(0);
9078 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9079 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9080 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9081 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9084 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9085 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9088 // Try harder to match 256-bit ops by using extract/concat.
9089 if (!Subtarget.hasAVX() || !VT.is256BitVector())
9092 // Count the number of UNDEF operands in the build_vector in input.
9093 unsigned NumElts = VT.getVectorNumElements();
9094 unsigned Half = NumElts / 2;
9095 unsigned NumUndefsLO = 0;
9096 unsigned NumUndefsHI = 0;
9097 for (unsigned i = 0, e = Half; i != e; ++i)
9098 if (BV->getOperand(i)->isUndef())
9101 for (unsigned i = Half, e = NumElts; i != e; ++i)
9102 if (BV->getOperand(i)->isUndef())
9106 SDValue InVec0, InVec1;
9107 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9108 SDValue InVec2, InVec3;
9110 bool CanFold = true;
9112 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9113 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9115 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9116 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9117 X86Opcode = X86ISD::HADD;
9118 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9120 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9122 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9123 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9124 X86Opcode = X86ISD::HSUB;
9129 // Do not try to expand this build_vector into a pair of horizontal
9130 // add/sub if we can emit a pair of scalar add/sub.
9131 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9134 // Convert this build_vector into a pair of horizontal binops followed by
9135 // a concat vector. We must adjust the outputs from the partial horizontal
9136 // matching calls above to account for undefined vector halves.
9137 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9138 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9139 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9140 bool isUndefLO = NumUndefsLO == Half;
9141 bool isUndefHI = NumUndefsHI == Half;
9142 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9147 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9148 VT == MVT::v16i16) {
9150 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9151 X86Opcode = X86ISD::HADD;
9152 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9154 X86Opcode = X86ISD::HSUB;
9155 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9157 X86Opcode = X86ISD::FHADD;
9158 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9160 X86Opcode = X86ISD::FHSUB;
9164 // Don't try to expand this build_vector into a pair of horizontal add/sub
9165 // if we can simply emit a pair of scalar add/sub.
9166 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9169 // Convert this build_vector into two horizontal add/sub followed by
9171 bool isUndefLO = NumUndefsLO == Half;
9172 bool isUndefHI = NumUndefsHI == Half;
9173 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9174 isUndefLO, isUndefHI);
9180 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
9181 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9182 /// just apply the bit to the vectors.
9183 /// NOTE: Its not in our interest to start make a general purpose vectorizer
9184 /// from this, but enough scalar bit operations are created from the later
9185 /// legalization + scalarization stages to need basic support.
9186 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9187 SelectionDAG &DAG) {
9189 MVT VT = Op->getSimpleValueType(0);
9190 unsigned NumElems = VT.getVectorNumElements();
9191 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9193 // Check that all elements have the same opcode.
9194 // TODO: Should we allow UNDEFS and if so how many?
9195 unsigned Opcode = Op->getOperand(0).getOpcode();
9196 for (unsigned i = 1; i < NumElems; ++i)
9197 if (Opcode != Op->getOperand(i).getOpcode())
9200 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9201 bool IsShift = false;
9213 // Don't do this if the buildvector is a splat - we'd replace one
9214 // constant with an entire vector.
9215 if (Op->getSplatValue())
9217 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9222 SmallVector<SDValue, 4> LHSElts, RHSElts;
9223 for (SDValue Elt : Op->ops()) {
9224 SDValue LHS = Elt.getOperand(0);
9225 SDValue RHS = Elt.getOperand(1);
9227 // We expect the canonicalized RHS operand to be the constant.
9228 if (!isa<ConstantSDNode>(RHS))
9231 // Extend shift amounts.
9232 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9235 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9238 LHSElts.push_back(LHS);
9239 RHSElts.push_back(RHS);
9242 // Limit to shifts by uniform immediates.
9243 // TODO: Only accept vXi8/vXi64 special cases?
9244 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9245 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9248 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9249 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9250 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9253 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
9254 /// functionality to do this, so it's all zeros, all ones, or some derivation
9255 /// that is cheap to calculate.
9256 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9257 const X86Subtarget &Subtarget) {
9259 MVT VT = Op.getSimpleValueType();
9261 // Vectors containing all zeros can be matched by pxor and xorps.
9262 if (ISD::isBuildVectorAllZeros(Op.getNode()))
9265 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9266 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9267 // vpcmpeqd on 256-bit vectors.
9268 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9269 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9272 return getOnesVector(VT, DAG, DL);
9278 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9279 /// from a vector of source values and a vector of extraction indices.
9280 /// The vectors might be manipulated to match the type of the permute op.
9281 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9282 SDLoc &DL, SelectionDAG &DAG,
9283 const X86Subtarget &Subtarget) {
9285 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9286 unsigned NumElts = VT.getVectorNumElements();
9287 unsigned SizeInBits = VT.getSizeInBits();
9289 // Adjust IndicesVec to match VT size.
9290 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9291 "Illegal variable permute mask size");
9292 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9293 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9294 NumElts * VT.getScalarSizeInBits());
9295 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9297 // Handle SrcVec that don't match VT type.
9298 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9299 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9300 // Handle larger SrcVec by treating it as a larger permute.
9301 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9302 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9303 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9304 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9305 Subtarget, DAG, SDLoc(IndicesVec));
9306 return extractSubVector(
9307 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9308 DAG, DL, SizeInBits);
9309 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9310 // Widen smaller SrcVec to match VT.
9311 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9316 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9317 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9318 EVT SrcVT = Idx.getValueType();
9319 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9320 uint64_t IndexScale = 0;
9321 uint64_t IndexOffset = 0;
9323 // If we're scaling a smaller permute op, then we need to repeat the
9324 // indices, scaling and offsetting them as well.
9325 // e.g. v4i32 -> v16i8 (Scale = 4)
9326 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9327 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9328 for (uint64_t i = 0; i != Scale; ++i) {
9329 IndexScale |= Scale << (i * NumDstBits);
9330 IndexOffset |= i << (i * NumDstBits);
9333 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9334 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9335 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9336 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9340 unsigned Opcode = 0;
9341 switch (VT.SimpleTy) {
9345 if (Subtarget.hasSSSE3())
9346 Opcode = X86ISD::PSHUFB;
9349 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9350 Opcode = X86ISD::VPERMV;
9351 else if (Subtarget.hasSSSE3()) {
9352 Opcode = X86ISD::PSHUFB;
9353 ShuffleVT = MVT::v16i8;
9358 if (Subtarget.hasAVX()) {
9359 Opcode = X86ISD::VPERMILPV;
9360 ShuffleVT = MVT::v4f32;
9361 } else if (Subtarget.hasSSSE3()) {
9362 Opcode = X86ISD::PSHUFB;
9363 ShuffleVT = MVT::v16i8;
9368 if (Subtarget.hasAVX()) {
9369 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9370 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9371 Opcode = X86ISD::VPERMILPV;
9372 ShuffleVT = MVT::v2f64;
9373 } else if (Subtarget.hasSSE41()) {
9374 // SSE41 can compare v2i64 - select between indices 0 and 1.
9375 return DAG.getSelectCC(
9377 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9378 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9379 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9380 ISD::CondCode::SETEQ);
9384 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9385 Opcode = X86ISD::VPERMV;
9386 else if (Subtarget.hasXOP()) {
9387 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9388 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9389 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9390 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9392 ISD::CONCAT_VECTORS, DL, VT,
9393 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9394 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9395 } else if (Subtarget.hasAVX()) {
9396 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9397 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9398 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9399 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9400 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9401 ArrayRef<SDValue> Ops) {
9402 // Permute Lo and Hi and then select based on index range.
9403 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9404 // care about the bit[7] as its just an index vector.
9405 SDValue Idx = Ops[2];
9406 EVT VT = Idx.getValueType();
9407 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9408 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9409 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9410 ISD::CondCode::SETGT);
9412 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9413 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9418 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9419 Opcode = X86ISD::VPERMV;
9420 else if (Subtarget.hasAVX()) {
9421 // Scale to v32i8 and perform as v32i8.
9422 IndicesVec = ScaleIndices(IndicesVec, 2);
9423 return DAG.getBitcast(
9424 VT, createVariablePermute(
9425 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9426 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9431 if (Subtarget.hasAVX2())
9432 Opcode = X86ISD::VPERMV;
9433 else if (Subtarget.hasAVX()) {
9434 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9435 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9436 {0, 1, 2, 3, 0, 1, 2, 3});
9437 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9438 {4, 5, 6, 7, 4, 5, 6, 7});
9439 if (Subtarget.hasXOP())
9440 return DAG.getBitcast(
9441 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9442 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9443 // Permute Lo and Hi and then select based on index range.
9444 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9445 SDValue Res = DAG.getSelectCC(
9446 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9447 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9448 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9449 ISD::CondCode::SETGT);
9450 return DAG.getBitcast(VT, Res);
9455 if (Subtarget.hasAVX512()) {
9456 if (!Subtarget.hasVLX()) {
9457 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9458 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9460 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9461 DAG, SDLoc(IndicesVec));
9462 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9464 return extract256BitVector(Res, 0, DAG, DL);
9466 Opcode = X86ISD::VPERMV;
9467 } else if (Subtarget.hasAVX()) {
9468 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9470 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9472 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9473 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9474 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9475 if (Subtarget.hasXOP())
9476 return DAG.getBitcast(
9477 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9478 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9479 // Permute Lo and Hi and then select based on index range.
9480 // This works as VPERMILPD only uses index bit[1] to permute elements.
9481 SDValue Res = DAG.getSelectCC(
9482 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9483 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9484 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9485 ISD::CondCode::SETGT);
9486 return DAG.getBitcast(VT, Res);
9490 if (Subtarget.hasVBMI())
9491 Opcode = X86ISD::VPERMV;
9494 if (Subtarget.hasBWI())
9495 Opcode = X86ISD::VPERMV;
9501 if (Subtarget.hasAVX512())
9502 Opcode = X86ISD::VPERMV;
9508 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9509 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9510 "Illegal variable permute shuffle type");
9512 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9514 IndicesVec = ScaleIndices(IndicesVec, Scale);
9516 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9517 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9519 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9520 SDValue Res = Opcode == X86ISD::VPERMV
9521 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9522 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9523 return DAG.getBitcast(VT, Res);
9526 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9527 // reasoned to be a permutation of a vector by indices in a non-constant vector.
9528 // (build_vector (extract_elt V, (extract_elt I, 0)),
9529 // (extract_elt V, (extract_elt I, 1)),
9534 // TODO: Handle undefs
9535 // TODO: Utilize pshufb and zero mask blending to support more efficient
9536 // construction of vectors with constant-0 elements.
9538 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9539 const X86Subtarget &Subtarget) {
9540 SDValue SrcVec, IndicesVec;
9541 // Check for a match of the permute source vector and permute index elements.
9542 // This is done by checking that the i-th build_vector operand is of the form:
9543 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9544 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9545 SDValue Op = V.getOperand(Idx);
9546 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9549 // If this is the first extract encountered in V, set the source vector,
9550 // otherwise verify the extract is from the previously defined source
9553 SrcVec = Op.getOperand(0);
9554 else if (SrcVec != Op.getOperand(0))
9556 SDValue ExtractedIndex = Op->getOperand(1);
9557 // Peek through extends.
9558 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9559 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9560 ExtractedIndex = ExtractedIndex.getOperand(0);
9561 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9564 // If this is the first extract from the index vector candidate, set the
9565 // indices vector, otherwise verify the extract is from the previously
9566 // defined indices vector.
9568 IndicesVec = ExtractedIndex.getOperand(0);
9569 else if (IndicesVec != ExtractedIndex.getOperand(0))
9572 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9573 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
9578 MVT VT = V.getSimpleValueType();
9579 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9583 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9586 MVT VT = Op.getSimpleValueType();
9587 MVT EltVT = VT.getVectorElementType();
9588 unsigned NumElems = Op.getNumOperands();
9590 // Generate vectors for predicate vectors.
9591 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9592 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9594 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9595 return VectorConstant;
9597 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9598 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9600 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9601 return HorizontalOp;
9602 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9604 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9607 unsigned EVTBits = EltVT.getSizeInBits();
9609 unsigned NumZero = 0;
9610 unsigned NumNonZero = 0;
9611 uint64_t NonZeros = 0;
9612 bool IsAllConstants = true;
9613 SmallSet<SDValue, 8> Values;
9614 unsigned NumConstants = NumElems;
9615 for (unsigned i = 0; i < NumElems; ++i) {
9616 SDValue Elt = Op.getOperand(i);
9620 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9621 IsAllConstants = false;
9624 if (X86::isZeroNode(Elt))
9627 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9628 NonZeros |= ((uint64_t)1 << i);
9633 // All undef vector. Return an UNDEF. All zero vectors were handled above.
9634 if (NumNonZero == 0)
9635 return DAG.getUNDEF(VT);
9637 // If we are inserting one variable into a vector of non-zero constants, try
9638 // to avoid loading each constant element as a scalar. Load the constants as a
9639 // vector and then insert the variable scalar element. If insertion is not
9640 // supported, fall back to a shuffle to get the scalar blended with the
9641 // constants. Insertion into a zero vector is handled as a special-case
9642 // somewhere below here.
9643 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9644 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9645 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9646 // Create an all-constant vector. The variable element in the old
9647 // build vector is replaced by undef in the constant vector. Save the
9648 // variable scalar element and its index for use in the insertelement.
9649 LLVMContext &Context = *DAG.getContext();
9650 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9651 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9654 for (unsigned i = 0; i != NumElems; ++i) {
9655 SDValue Elt = Op.getOperand(i);
9656 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9657 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9658 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9659 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9660 else if (!Elt.isUndef()) {
9661 assert(!VarElt.getNode() && !InsIndex.getNode() &&
9662 "Expected one variable element in this vector");
9664 InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9667 Constant *CV = ConstantVector::get(ConstVecOps);
9668 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9670 // The constants we just created may not be legal (eg, floating point). We
9671 // must lower the vector right here because we can not guarantee that we'll
9672 // legalize it before loading it. This is also why we could not just create
9673 // a new build vector here. If the build vector contains illegal constants,
9674 // it could get split back up into a series of insert elements.
9675 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9676 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9677 MachineFunction &MF = DAG.getMachineFunction();
9678 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9679 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9680 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9681 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9682 if (InsertC < NumEltsInLow128Bits)
9683 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9685 // There's no good way to insert into the high elements of a >128-bit
9686 // vector, so use shuffles to avoid an extract/insert sequence.
9687 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9688 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9689 SmallVector<int, 8> ShuffleMask;
9690 unsigned NumElts = VT.getVectorNumElements();
9691 for (unsigned i = 0; i != NumElts; ++i)
9692 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9693 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9694 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9697 // Special case for single non-zero, non-undef, element.
9698 if (NumNonZero == 1) {
9699 unsigned Idx = countTrailingZeros(NonZeros);
9700 SDValue Item = Op.getOperand(Idx);
9702 // If we have a constant or non-constant insertion into the low element of
9703 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9704 // the rest of the elements. This will be matched as movd/movq/movss/movsd
9705 // depending on what the source datatype is.
9708 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9710 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9711 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9712 assert((VT.is128BitVector() || VT.is256BitVector() ||
9713 VT.is512BitVector()) &&
9714 "Expected an SSE value type!");
9715 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9716 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9717 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9720 // We can't directly insert an i8 or i16 into a vector, so zero extend
9722 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9723 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9724 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9725 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9726 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9727 return DAG.getBitcast(VT, Item);
9731 // Is it a vector logical left shift?
9732 if (NumElems == 2 && Idx == 1 &&
9733 X86::isZeroNode(Op.getOperand(0)) &&
9734 !X86::isZeroNode(Op.getOperand(1))) {
9735 unsigned NumBits = VT.getSizeInBits();
9736 return getVShift(true, VT,
9737 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
9738 VT, Op.getOperand(1)),
9739 NumBits/2, DAG, *this, dl);
9742 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
9745 // Otherwise, if this is a vector with i32 or f32 elements, and the element
9746 // is a non-constant being inserted into an element other than the low one,
9747 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
9748 // movd/movss) to move this into the low element, then shuffle it into
9750 if (EVTBits == 32) {
9751 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9752 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
9756 // Splat is obviously ok. Let legalizer expand it to a shuffle.
9757 if (Values.size() == 1) {
9758 if (EVTBits == 32) {
9759 // Instead of a shuffle like this:
9760 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
9761 // Check if it's possible to issue this instead.
9762 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
9763 unsigned Idx = countTrailingZeros(NonZeros);
9764 SDValue Item = Op.getOperand(Idx);
9765 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
9766 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
9771 // A vector full of immediates; various special cases are already
9772 // handled, so this is best done with a single constant-pool load.
9776 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
9779 // See if we can use a vector load to get all of the elements.
9781 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
9783 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9787 // If this is a splat of pairs of 32-bit elements, we can use a narrower
9788 // build_vector and broadcast it.
9789 // TODO: We could probably generalize this more.
9790 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
9791 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9792 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9793 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
9794 // Make sure all the even/odd operands match.
9795 for (unsigned i = 2; i != NumElems; ++i)
9796 if (Ops[i % 2] != Op.getOperand(i))
9800 if (CanSplat(Op, NumElems, Ops)) {
9801 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
9802 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
9803 // Create a new build vector and cast to v2i64/v2f64.
9804 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
9805 DAG.getBuildVector(NarrowVT, dl, Ops));
9806 // Broadcast from v2i64/v2f64 and cast to final VT.
9807 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
9808 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
9813 // For AVX-length vectors, build the individual 128-bit pieces and use
9814 // shuffles to put them in place.
9815 if (VT.getSizeInBits() > 128) {
9816 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
9818 // Build both the lower and upper subvector.
9820 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
9821 SDValue Upper = DAG.getBuildVector(
9822 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
9824 // Recreate the wider vector with the lower and upper part.
9825 return concatSubVectors(Lower, Upper, DAG, dl);
9828 // Let legalizer expand 2-wide build_vectors.
9829 if (EVTBits == 64) {
9830 if (NumNonZero == 1) {
9831 // One half is zero or undef.
9832 unsigned Idx = countTrailingZeros(NonZeros);
9833 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
9834 Op.getOperand(Idx));
9835 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
9840 // If element VT is < 32 bits, convert it to inserts into a zero vector.
9841 if (EVTBits == 8 && NumElems == 16)
9842 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
9846 if (EVTBits == 16 && NumElems == 8)
9847 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
9851 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
9852 if (EVTBits == 32 && NumElems == 4)
9853 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
9856 // If element VT is == 32 bits, turn it into a number of shuffles.
9857 if (NumElems == 4 && NumZero > 0) {
9858 SmallVector<SDValue, 8> Ops(NumElems);
9859 for (unsigned i = 0; i < 4; ++i) {
9860 bool isZero = !(NonZeros & (1ULL << i));
9862 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
9864 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9867 for (unsigned i = 0; i < 2; ++i) {
9868 switch ((NonZeros >> (i*2)) & 0x3) {
9869 default: llvm_unreachable("Unexpected NonZero count");
9871 Ops[i] = Ops[i*2]; // Must be a zero vector.
9874 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
9877 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9880 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9885 bool Reverse1 = (NonZeros & 0x3) == 2;
9886 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
9890 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9891 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
9893 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9896 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9898 // Check for a build vector from mostly shuffle plus few inserting.
9899 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9902 // For SSE 4.1, use insertps to put the high elements into the low element.
9903 if (Subtarget.hasSSE41()) {
9905 if (!Op.getOperand(0).isUndef())
9906 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9908 Result = DAG.getUNDEF(VT);
9910 for (unsigned i = 1; i < NumElems; ++i) {
9911 if (Op.getOperand(i).isUndef()) continue;
9912 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9913 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9918 // Otherwise, expand into a number of unpckl*, start by extending each of
9919 // our (non-undef) elements to the full vector width with the element in the
9920 // bottom slot of the vector (which generates no code for SSE).
9921 SmallVector<SDValue, 8> Ops(NumElems);
9922 for (unsigned i = 0; i < NumElems; ++i) {
9923 if (!Op.getOperand(i).isUndef())
9924 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9926 Ops[i] = DAG.getUNDEF(VT);
9929 // Next, we iteratively mix elements, e.g. for v4f32:
9930 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9931 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9932 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
9933 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9934 // Generate scaled UNPCKL shuffle mask.
9935 SmallVector<int, 16> Mask;
9936 for(unsigned i = 0; i != Scale; ++i)
9938 for (unsigned i = 0; i != Scale; ++i)
9939 Mask.push_back(NumElems+i);
9940 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9942 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9943 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9948 // 256-bit AVX can use the vinsertf128 instruction
9949 // to create 256-bit vectors from two other 128-bit ones.
9950 // TODO: Detect subvector broadcast here instead of DAG combine?
9951 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9952 const X86Subtarget &Subtarget) {
9954 MVT ResVT = Op.getSimpleValueType();
9956 assert((ResVT.is256BitVector() ||
9957 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9959 unsigned NumOperands = Op.getNumOperands();
9960 unsigned NumZero = 0;
9961 unsigned NumNonZero = 0;
9962 unsigned NonZeros = 0;
9963 for (unsigned i = 0; i != NumOperands; ++i) {
9964 SDValue SubVec = Op.getOperand(i);
9965 if (SubVec.isUndef())
9967 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9970 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9976 // If we have more than 2 non-zeros, build each half separately.
9977 if (NumNonZero > 2) {
9978 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9979 ArrayRef<SDUse> Ops = Op->ops();
9980 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9981 Ops.slice(0, NumOperands/2));
9982 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9983 Ops.slice(NumOperands/2));
9984 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9987 // Otherwise, build it up through insert_subvectors.
9988 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9989 : DAG.getUNDEF(ResVT);
9991 MVT SubVT = Op.getOperand(0).getSimpleValueType();
9992 unsigned NumSubElems = SubVT.getVectorNumElements();
9993 for (unsigned i = 0; i != NumOperands; ++i) {
9994 if ((NonZeros & (1 << i)) == 0)
9997 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9999 DAG.getIntPtrConstant(i * NumSubElems, dl));
10005 // Returns true if the given node is a type promotion (by concatenating i1
10006 // zeros) of the result of a node that already zeros all upper bits of
10008 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
10009 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10010 const X86Subtarget &Subtarget,
10011 SelectionDAG & DAG) {
10013 MVT ResVT = Op.getSimpleValueType();
10014 unsigned NumOperands = Op.getNumOperands();
10016 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10017 "Unexpected number of operands in CONCAT_VECTORS");
10019 uint64_t Zeros = 0;
10020 uint64_t NonZeros = 0;
10021 for (unsigned i = 0; i != NumOperands; ++i) {
10022 SDValue SubVec = Op.getOperand(i);
10023 if (SubVec.isUndef())
10025 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10026 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10027 Zeros |= (uint64_t)1 << i;
10029 NonZeros |= (uint64_t)1 << i;
10032 unsigned NumElems = ResVT.getVectorNumElements();
10034 // If we are inserting non-zero vector and there are zeros in LSBs and undef
10035 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10036 // insert_subvector will give us two kshifts.
10037 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10038 Log2_64(NonZeros) != NumOperands - 1) {
10039 MVT ShiftVT = ResVT;
10040 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10041 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10042 unsigned Idx = Log2_64(NonZeros);
10043 SDValue SubVec = Op.getOperand(Idx);
10044 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10045 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10046 DAG.getUNDEF(ShiftVT), SubVec,
10047 DAG.getIntPtrConstant(0, dl));
10048 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10049 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10050 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10051 DAG.getIntPtrConstant(0, dl));
10054 // If there are zero or one non-zeros we can handle this very simply.
10055 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10056 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10059 unsigned Idx = Log2_64(NonZeros);
10060 SDValue SubVec = Op.getOperand(Idx);
10061 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10062 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10063 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10066 if (NumOperands > 2) {
10067 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10068 ArrayRef<SDUse> Ops = Op->ops();
10069 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10070 Ops.slice(0, NumOperands/2));
10071 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10072 Ops.slice(NumOperands/2));
10073 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10076 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10078 if (ResVT.getVectorNumElements() >= 16)
10079 return Op; // The operation is legal with KUNPCK
10081 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10082 DAG.getUNDEF(ResVT), Op.getOperand(0),
10083 DAG.getIntPtrConstant(0, dl));
10084 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10085 DAG.getIntPtrConstant(NumElems/2, dl));
10088 static SDValue LowerCONCAT_VECTORS(SDValue Op,
10089 const X86Subtarget &Subtarget,
10090 SelectionDAG &DAG) {
10091 MVT VT = Op.getSimpleValueType();
10092 if (VT.getVectorElementType() == MVT::i1)
10093 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10095 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10096 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10097 Op.getNumOperands() == 4)));
10099 // AVX can use the vinsertf128 instruction to create 256-bit vectors
10100 // from two other 128-bit ones.
10102 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10103 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10106 //===----------------------------------------------------------------------===//
10107 // Vector shuffle lowering
10109 // This is an experimental code path for lowering vector shuffles on x86. It is
10110 // designed to handle arbitrary vector shuffles and blends, gracefully
10111 // degrading performance as necessary. It works hard to recognize idiomatic
10112 // shuffles and lower them to optimal instruction patterns without leaving
10113 // a framework that allows reasonably efficient handling of all vector shuffle
10115 //===----------------------------------------------------------------------===//
10117 /// Tiny helper function to identify a no-op mask.
10119 /// This is a somewhat boring predicate function. It checks whether the mask
10120 /// array input, which is assumed to be a single-input shuffle mask of the kind
10121 /// used by the X86 shuffle instructions (not a fully general
10122 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10123 /// in-place shuffle are 'no-op's.
10124 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10125 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10126 assert(Mask[i] >= -1 && "Out of bound mask element!");
10127 if (Mask[i] >= 0 && Mask[i] != i)
10133 /// Test whether there are elements crossing 128-bit lanes in this
10136 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10137 /// and we routinely test for these.
10138 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10139 int LaneSize = 128 / VT.getScalarSizeInBits();
10140 int Size = Mask.size();
10141 for (int i = 0; i < Size; ++i)
10142 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10147 /// Test whether a shuffle mask is equivalent within each sub-lane.
10149 /// This checks a shuffle mask to see if it is performing the same
10150 /// lane-relative shuffle in each sub-lane. This trivially implies
10151 /// that it is also not lane-crossing. It may however involve a blend from the
10152 /// same lane of a second vector.
10154 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10155 /// non-trivial to compute in the face of undef lanes. The representation is
10156 /// suitable for use with existing 128-bit shuffles as entries from the second
10157 /// vector have been remapped to [LaneSize, 2*LaneSize).
10158 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10159 ArrayRef<int> Mask,
10160 SmallVectorImpl<int> &RepeatedMask) {
10161 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10162 RepeatedMask.assign(LaneSize, -1);
10163 int Size = Mask.size();
10164 for (int i = 0; i < Size; ++i) {
10165 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10168 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10169 // This entry crosses lanes, so there is no way to model this shuffle.
10172 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10173 // Adjust second vector indices to start at LaneSize instead of Size.
10174 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10175 : Mask[i] % LaneSize + LaneSize;
10176 if (RepeatedMask[i % LaneSize] < 0)
10177 // This is the first non-undef entry in this slot of a 128-bit lane.
10178 RepeatedMask[i % LaneSize] = LocalM;
10179 else if (RepeatedMask[i % LaneSize] != LocalM)
10180 // Found a mismatch with the repeated mask.
10186 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
10188 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10189 SmallVectorImpl<int> &RepeatedMask) {
10190 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10194 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10195 SmallVector<int, 32> RepeatedMask;
10196 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10199 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
10201 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10202 SmallVectorImpl<int> &RepeatedMask) {
10203 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10206 /// Test whether a target shuffle mask is equivalent within each sub-lane.
10207 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10208 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10209 ArrayRef<int> Mask,
10210 SmallVectorImpl<int> &RepeatedMask) {
10211 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10212 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10213 int Size = Mask.size();
10214 for (int i = 0; i < Size; ++i) {
10215 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10216 if (Mask[i] == SM_SentinelUndef)
10218 if (Mask[i] == SM_SentinelZero) {
10219 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10221 RepeatedMask[i % LaneSize] = SM_SentinelZero;
10224 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10225 // This entry crosses lanes, so there is no way to model this shuffle.
10228 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10229 // Adjust second vector indices to start at LaneSize instead of Size.
10231 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10232 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10233 // This is the first non-undef entry in this slot of a 128-bit lane.
10234 RepeatedMask[i % LaneSize] = LocalM;
10235 else if (RepeatedMask[i % LaneSize] != LocalM)
10236 // Found a mismatch with the repeated mask.
10242 /// Checks whether a shuffle mask is equivalent to an explicit list of
10245 /// This is a fast way to test a shuffle mask against a fixed pattern:
10247 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10249 /// It returns true if the mask is exactly as wide as the argument list, and
10250 /// each element of the mask is either -1 (signifying undef) or the value given
10251 /// in the argument.
10252 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10253 ArrayRef<int> ExpectedMask) {
10254 if (Mask.size() != ExpectedMask.size())
10257 int Size = Mask.size();
10259 // If the values are build vectors, we can look through them to find
10260 // equivalent inputs that make the shuffles equivalent.
10261 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10262 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10264 for (int i = 0; i < Size; ++i) {
10265 assert(Mask[i] >= -1 && "Out of bound mask element!");
10266 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10267 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10268 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10269 if (!MaskBV || !ExpectedBV ||
10270 MaskBV->getOperand(Mask[i] % Size) !=
10271 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10279 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10281 /// The masks must be exactly the same width.
10283 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10284 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
10286 /// SM_SentinelZero is accepted as a valid negative index but must match in
10288 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10289 ArrayRef<int> ExpectedMask,
10290 SDValue V1 = SDValue(),
10291 SDValue V2 = SDValue()) {
10292 int Size = Mask.size();
10293 if (Size != (int)ExpectedMask.size())
10295 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10296 "Illegal target shuffle mask");
10298 // Check for out-of-range target shuffle mask indices.
10299 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10302 // If the values are build vectors, we can look through them to find
10303 // equivalent inputs that make the shuffles equivalent.
10304 auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10305 auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10306 BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10307 BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10309 for (int i = 0; i < Size; ++i) {
10310 if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10312 if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10313 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10314 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10315 if (MaskBV && ExpectedBV &&
10316 MaskBV->getOperand(Mask[i] % Size) ==
10317 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10320 // TODO - handle SM_Sentinel equivalences.
10326 // Attempt to create a shuffle mask from a VSELECT condition mask.
10327 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10329 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10332 unsigned Size = Cond.getValueType().getVectorNumElements();
10333 Mask.resize(Size, SM_SentinelUndef);
10335 for (int i = 0; i != (int)Size; ++i) {
10336 SDValue CondElt = Cond.getOperand(i);
10338 // Arbitrarily choose from the 2nd operand if the select condition element
10340 // TODO: Can we do better by matching patterns such as even/odd?
10341 if (CondElt.isUndef() || isNullConstant(CondElt))
10348 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10350 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10351 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10354 SmallVector<int, 8> Unpcklwd;
10355 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10356 /* Unary = */ false);
10357 SmallVector<int, 8> Unpckhwd;
10358 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10359 /* Unary = */ false);
10360 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10361 isTargetShuffleEquivalent(Mask, Unpckhwd));
10362 return IsUnpackwdMask;
10365 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10366 // Create 128-bit vector type based on mask size.
10367 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10368 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10370 // We can't assume a canonical shuffle mask, so try the commuted version too.
10371 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10372 ShuffleVectorSDNode::commuteMask(CommutedMask);
10374 // Match any of unary/binary or low/high.
10375 for (unsigned i = 0; i != 4; ++i) {
10376 SmallVector<int, 16> UnpackMask;
10377 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10378 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10379 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10385 /// Return true if a shuffle mask chooses elements identically in its top and
10386 /// bottom halves. For example, any splat mask has the same top and bottom
10387 /// halves. If an element is undefined in only one half of the mask, the halves
10388 /// are not considered identical.
10389 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10390 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10391 unsigned HalfSize = Mask.size() / 2;
10392 for (unsigned i = 0; i != HalfSize; ++i) {
10393 if (Mask[i] != Mask[i + HalfSize])
10399 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10401 /// This helper function produces an 8-bit shuffle immediate corresponding to
10402 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10403 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10406 /// NB: We rely heavily on "undef" masks preserving the input lane.
10407 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10408 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10409 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10410 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10411 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10412 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10415 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10416 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10417 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10418 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10422 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10423 SelectionDAG &DAG) {
10424 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10427 /// Compute whether each element of a shuffle is zeroable.
10429 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
10430 /// Either it is an undef element in the shuffle mask, the element of the input
10431 /// referenced is undef, or the element of the input referenced is known to be
10432 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
10433 /// as many lanes with this technique as possible to simplify the remaining
10435 static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
10436 SDValue V1, SDValue V2) {
10437 APInt Zeroable(Mask.size(), 0);
10438 V1 = peekThroughBitcasts(V1);
10439 V2 = peekThroughBitcasts(V2);
10441 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
10442 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
10444 int VectorSizeInBits = V1.getValueSizeInBits();
10445 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
10446 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
10448 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10450 // Handle the easy cases.
10451 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
10452 Zeroable.setBit(i);
10456 // Determine shuffle input and normalize the mask.
10457 SDValue V = M < Size ? V1 : V2;
10460 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
10461 if (V.getOpcode() != ISD::BUILD_VECTOR)
10464 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
10465 // the (larger) source element must be UNDEF/ZERO.
10466 if ((Size % V.getNumOperands()) == 0) {
10467 int Scale = Size / V->getNumOperands();
10468 SDValue Op = V.getOperand(M / Scale);
10469 if (Op.isUndef() || X86::isZeroNode(Op))
10470 Zeroable.setBit(i);
10471 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
10472 APInt Val = Cst->getAPIntValue();
10473 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10474 Val = Val.getLoBits(ScalarSizeInBits);
10476 Zeroable.setBit(i);
10477 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
10478 APInt Val = Cst->getValueAPF().bitcastToAPInt();
10479 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10480 Val = Val.getLoBits(ScalarSizeInBits);
10482 Zeroable.setBit(i);
10487 // If the BUILD_VECTOR has more elements then all the (smaller) source
10488 // elements must be UNDEF or ZERO.
10489 if ((V.getNumOperands() % Size) == 0) {
10490 int Scale = V->getNumOperands() / Size;
10491 bool AllZeroable = true;
10492 for (int j = 0; j < Scale; ++j) {
10493 SDValue Op = V.getOperand((M * Scale) + j);
10494 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
10497 Zeroable.setBit(i);
10505 // The Shuffle result is as follow:
10506 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10507 // Each Zeroable's element correspond to a particular Mask's element.
10508 // As described in computeZeroableShuffleElements function.
10510 // The function looks for a sub-mask that the nonzero elements are in
10511 // increasing order. If such sub-mask exist. The function returns true.
10512 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10513 ArrayRef<int> Mask, const EVT &VectorType,
10514 bool &IsZeroSideLeft) {
10515 int NextElement = -1;
10516 // Check if the Mask's nonzero elements are in increasing order.
10517 for (int i = 0, e = Mask.size(); i < e; i++) {
10518 // Checks if the mask's zeros elements are built from only zeros.
10519 assert(Mask[i] >= -1 && "Out of bound mask element!");
10524 // Find the lowest non zero element
10525 if (NextElement < 0) {
10526 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10527 IsZeroSideLeft = NextElement != 0;
10529 // Exit if the mask's non zero elements are not in increasing order.
10530 if (NextElement != Mask[i])
10537 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10538 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10539 ArrayRef<int> Mask, SDValue V1,
10540 SDValue V2, const APInt &Zeroable,
10541 const X86Subtarget &Subtarget,
10542 SelectionDAG &DAG) {
10543 int Size = Mask.size();
10544 int LaneSize = 128 / VT.getScalarSizeInBits();
10545 const int NumBytes = VT.getSizeInBits() / 8;
10546 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10548 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10549 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10550 (Subtarget.hasBWI() && VT.is512BitVector()));
10552 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10553 // Sign bit set in i8 mask means zero element.
10554 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10557 for (int i = 0; i < NumBytes; ++i) {
10558 int M = Mask[i / NumEltBytes];
10560 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10563 if (Zeroable[i / NumEltBytes]) {
10564 PSHUFBMask[i] = ZeroMask;
10568 // We can only use a single input of V1 or V2.
10569 SDValue SrcV = (M >= Size ? V2 : V1);
10570 if (V && V != SrcV)
10575 // PSHUFB can't cross lanes, ensure this doesn't happen.
10576 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10580 M = M * NumEltBytes + (i % NumEltBytes);
10581 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10583 assert(V && "Failed to find a source input");
10585 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10586 return DAG.getBitcast(
10587 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10588 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10591 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10592 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10595 // X86 has dedicated shuffle that can be lowered to VEXPAND
10596 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10597 const APInt &Zeroable,
10598 ArrayRef<int> Mask, SDValue &V1,
10599 SDValue &V2, SelectionDAG &DAG,
10600 const X86Subtarget &Subtarget) {
10601 bool IsLeftZeroSide = true;
10602 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10605 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10607 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10608 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10609 unsigned NumElts = VT.getVectorNumElements();
10610 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10611 "Unexpected number of vector elements");
10612 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10613 Subtarget, DAG, DL);
10614 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10615 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10616 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10619 static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10620 unsigned &UnpackOpcode, bool IsUnary,
10621 ArrayRef<int> TargetMask,
10622 const SDLoc &DL, SelectionDAG &DAG,
10623 const X86Subtarget &Subtarget) {
10624 int NumElts = VT.getVectorNumElements();
10626 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10627 for (int i = 0; i != NumElts; i += 2) {
10628 int M1 = TargetMask[i + 0];
10629 int M2 = TargetMask[i + 1];
10630 Undef1 &= (SM_SentinelUndef == M1);
10631 Undef2 &= (SM_SentinelUndef == M2);
10632 Zero1 &= isUndefOrZero(M1);
10633 Zero2 &= isUndefOrZero(M2);
10635 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10636 "Zeroable shuffle detected");
10638 // Attempt to match the target mask against the unpack lo/hi mask patterns.
10639 SmallVector<int, 64> Unpckl, Unpckh;
10640 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10641 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10642 UnpackOpcode = X86ISD::UNPCKL;
10643 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10644 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10648 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10649 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10650 UnpackOpcode = X86ISD::UNPCKH;
10651 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10652 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10656 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10657 if (IsUnary && (Zero1 || Zero2)) {
10658 // Don't bother if we can blend instead.
10659 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10660 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10663 bool MatchLo = true, MatchHi = true;
10664 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10665 int M = TargetMask[i];
10667 // Ignore if the input is known to be zero or the index is undef.
10668 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10669 (M == SM_SentinelUndef))
10672 MatchLo &= (M == Unpckl[i]);
10673 MatchHi &= (M == Unpckh[i]);
10676 if (MatchLo || MatchHi) {
10677 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10678 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10679 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10684 // If a binary shuffle, commute and try again.
10686 ShuffleVectorSDNode::commuteMask(Unpckl);
10687 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10688 UnpackOpcode = X86ISD::UNPCKL;
10693 ShuffleVectorSDNode::commuteMask(Unpckh);
10694 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10695 UnpackOpcode = X86ISD::UNPCKH;
10704 // X86 has dedicated unpack instructions that can handle specific blend
10705 // operations: UNPCKH and UNPCKL.
10706 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10707 ArrayRef<int> Mask, SDValue V1, SDValue V2,
10708 SelectionDAG &DAG) {
10709 SmallVector<int, 8> Unpckl;
10710 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10711 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10712 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10714 SmallVector<int, 8> Unpckh;
10715 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10716 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10717 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10719 // Commute and try again.
10720 ShuffleVectorSDNode::commuteMask(Unpckl);
10721 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10722 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10724 ShuffleVectorSDNode::commuteMask(Unpckh);
10725 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10726 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10731 static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10733 int Size = (int)Mask.size();
10734 int Split = Size / Delta;
10735 int TruncatedVectorStart = SwappedOps ? Size : 0;
10737 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10738 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10741 // The rest of the mask should not refer to the truncated vector's elements.
10742 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10743 TruncatedVectorStart + Size))
10749 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10751 // An example is the following:
10753 // t0: ch = EntryToken
10754 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10755 // t25: v4i32 = truncate t2
10756 // t41: v8i16 = bitcast t25
10757 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10758 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10759 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10760 // t18: v2i64 = bitcast t51
10762 // Without avx512vl, this is lowered to:
10764 // vpmovqd %zmm0, %ymm0
10765 // vpshufb {{.*#+}} xmm0 =
10766 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10768 // But when avx512vl is available, one can just use a single vpmovdw
10770 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10771 MVT VT, SDValue V1, SDValue V2,
10773 const X86Subtarget &Subtarget) {
10774 if (VT != MVT::v16i8 && VT != MVT::v8i16)
10777 if (Mask.size() != VT.getVectorNumElements())
10780 bool SwappedOps = false;
10782 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10783 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10792 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10793 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10795 // and similar ones.
10796 if (V1.getOpcode() != ISD::BITCAST)
10798 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
10801 SDValue Src = V1.getOperand(0).getOperand(0);
10802 MVT SrcVT = Src.getSimpleValueType();
10804 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
10805 // are only available with avx512vl.
10806 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
10809 // Down Convert Word to Byte is only available with avx512bw. The case with
10810 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
10811 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
10812 !Subtarget.hasBWI())
10815 // The first half/quarter of the mask should refer to every second/fourth
10816 // element of the vector truncated and bitcasted.
10817 if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
10818 !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
10821 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
10824 // X86 has dedicated pack instructions that can handle specific truncation
10825 // operations: PACKSS and PACKUS.
10826 static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
10827 SDValue &V2, unsigned &PackOpcode,
10828 ArrayRef<int> TargetMask,
10830 const X86Subtarget &Subtarget) {
10831 unsigned NumElts = VT.getVectorNumElements();
10832 unsigned BitSize = VT.getScalarSizeInBits();
10833 MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
10834 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
10836 auto MatchPACK = [&](SDValue N1, SDValue N2) {
10837 SDValue VV1 = DAG.getBitcast(PackVT, N1);
10838 SDValue VV2 = DAG.getBitcast(PackVT, N2);
10839 if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
10840 APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
10841 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
10842 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
10846 PackOpcode = X86ISD::PACKUS;
10850 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
10851 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
10855 PackOpcode = X86ISD::PACKSS;
10861 // Try binary shuffle.
10862 SmallVector<int, 32> BinaryMask;
10863 createPackShuffleMask(VT, BinaryMask, false);
10864 if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
10865 if (MatchPACK(V1, V2))
10868 // Try unary shuffle.
10869 SmallVector<int, 32> UnaryMask;
10870 createPackShuffleMask(VT, UnaryMask, true);
10871 if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
10872 if (MatchPACK(V1, V1))
10878 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10879 SDValue V1, SDValue V2, SelectionDAG &DAG,
10880 const X86Subtarget &Subtarget) {
10882 unsigned PackOpcode;
10883 if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10885 return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
10886 DAG.getBitcast(PackVT, V2));
10891 /// Try to emit a bitmask instruction for a shuffle.
10893 /// This handles cases where we can model a blend exactly as a bitmask due to
10894 /// one of the inputs being zeroable.
10895 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10896 SDValue V2, ArrayRef<int> Mask,
10897 const APInt &Zeroable,
10898 const X86Subtarget &Subtarget,
10899 SelectionDAG &DAG) {
10901 MVT EltVT = VT.getVectorElementType();
10902 SDValue Zero, AllOnes;
10903 // Use f64 if i64 isn't legal.
10904 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10906 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10910 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10911 Zero = DAG.getConstantFP(0.0, DL, EltVT);
10912 AllOnes = DAG.getConstantFP(
10913 APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
10915 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10917 Zero = DAG.getConstant(0, DL, EltVT);
10918 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10921 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10923 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10926 if (Mask[i] % Size != i)
10927 return SDValue(); // Not a blend.
10929 V = Mask[i] < Size ? V1 : V2;
10930 else if (V != (Mask[i] < Size ? V1 : V2))
10931 return SDValue(); // Can only let one input through the mask.
10933 VMaskOps[i] = AllOnes;
10936 return SDValue(); // No non-zeroable elements!
10938 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10939 VMask = DAG.getBitcast(LogicVT, VMask);
10940 V = DAG.getBitcast(LogicVT, V);
10941 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10942 return DAG.getBitcast(VT, And);
10945 /// Try to emit a blend instruction for a shuffle using bit math.
10947 /// This is used as a fallback approach when first class blend instructions are
10948 /// unavailable. Currently it is only suitable for integer vectors, but could
10949 /// be generalized for floating point vectors if desirable.
10950 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10951 SDValue V2, ArrayRef<int> Mask,
10952 SelectionDAG &DAG) {
10953 assert(VT.isInteger() && "Only supports integer vector types!");
10954 MVT EltVT = VT.getVectorElementType();
10955 SDValue Zero = DAG.getConstant(0, DL, EltVT);
10956 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10957 SmallVector<SDValue, 16> MaskOps;
10958 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10959 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10960 return SDValue(); // Shuffled input!
10961 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10964 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10965 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
10966 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
10967 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
10970 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10971 SDValue PreservedSrc,
10972 const X86Subtarget &Subtarget,
10973 SelectionDAG &DAG);
10975 static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
10976 MutableArrayRef<int> Mask,
10977 const APInt &Zeroable, bool &ForceV1Zero,
10978 bool &ForceV2Zero, uint64_t &BlendMask) {
10979 bool V1IsZeroOrUndef =
10980 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10981 bool V2IsZeroOrUndef =
10982 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10985 ForceV1Zero = false, ForceV2Zero = false;
10986 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
10988 // Attempt to generate the binary blend mask. If an input is zero then
10989 // we can use any lane.
10990 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10992 if (M == SM_SentinelUndef)
10996 if (M == i + Size) {
10997 BlendMask |= 1ull << i;
11001 if (V1IsZeroOrUndef) {
11002 ForceV1Zero = true;
11006 if (V2IsZeroOrUndef) {
11007 ForceV2Zero = true;
11008 BlendMask |= 1ull << i;
11009 Mask[i] = i + Size;
11018 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11020 uint64_t ScaledMask = 0;
11021 for (int i = 0; i != Size; ++i)
11022 if (BlendMask & (1ull << i))
11023 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11027 /// Try to emit a blend instruction for a shuffle.
11029 /// This doesn't do any checks for the availability of instructions for blending
11030 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11031 /// be matched in the backend with the type given. What it does check for is
11032 /// that the shuffle mask is a blend, or convertible into a blend with zero.
11033 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11034 SDValue V2, ArrayRef<int> Original,
11035 const APInt &Zeroable,
11036 const X86Subtarget &Subtarget,
11037 SelectionDAG &DAG) {
11038 uint64_t BlendMask = 0;
11039 bool ForceV1Zero = false, ForceV2Zero = false;
11040 SmallVector<int, 64> Mask(Original.begin(), Original.end());
11041 if (!matchVectorShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11045 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11047 V1 = getZeroVector(VT, Subtarget, DAG, DL);
11049 V2 = getZeroVector(VT, Subtarget, DAG, DL);
11051 switch (VT.SimpleTy) {
11054 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11058 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11065 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11066 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11067 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11068 case MVT::v16i16: {
11069 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11070 SmallVector<int, 8> RepeatedMask;
11071 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11072 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11073 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11075 for (int i = 0; i < 8; ++i)
11076 if (RepeatedMask[i] >= 8)
11077 BlendMask |= 1ull << i;
11078 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11079 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11081 // Use PBLENDW for lower/upper lanes and then blend lanes.
11082 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11083 // merge to VSELECT where useful.
11084 uint64_t LoMask = BlendMask & 0xFF;
11085 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11086 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11087 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11088 DAG.getTargetConstant(LoMask, DL, MVT::i8));
11089 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11090 DAG.getTargetConstant(HiMask, DL, MVT::i8));
11091 return DAG.getVectorShuffle(
11092 MVT::v16i16, DL, Lo, Hi,
11093 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11098 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11101 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11103 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11104 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11108 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11110 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11111 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11112 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11115 // Scale the blend by the number of bytes per element.
11116 int Scale = VT.getScalarSizeInBits() / 8;
11118 // This form of blend is always done on bytes. Compute the byte vector
11120 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11122 // x86 allows load folding with blendvb from the 2nd source operand. But
11123 // we are still using LLVM select here (see comment below), so that's V1.
11124 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11125 // allow that load-folding possibility.
11126 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11127 ShuffleVectorSDNode::commuteMask(Mask);
11131 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11132 // mix of LLVM's code generator and the x86 backend. We tell the code
11133 // generator that boolean values in the elements of an x86 vector register
11134 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11135 // mapping a select to operand #1, and 'false' mapping to operand #2. The
11136 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11137 // of the element (the remaining are ignored) and 0 in that high bit would
11138 // mean operand #1 while 1 in the high bit would mean operand #2. So while
11139 // the LLVM model for boolean values in vector elements gets the relevant
11140 // bit set, it is set backwards and over constrained relative to x86's
11142 SmallVector<SDValue, 32> VSELECTMask;
11143 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11144 for (int j = 0; j < Scale; ++j)
11145 VSELECTMask.push_back(
11146 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11147 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11150 V1 = DAG.getBitcast(BlendVT, V1);
11151 V2 = DAG.getBitcast(BlendVT, V2);
11152 return DAG.getBitcast(
11154 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11163 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11164 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
11166 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11171 // Otherwise load an immediate into a GPR, cast to k-register, and use a
11174 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11175 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11176 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11179 llvm_unreachable("Not a supported integer vector type!");
11183 /// Try to lower as a blend of elements from two inputs followed by
11184 /// a single-input permutation.
11186 /// This matches the pattern where we can blend elements from two inputs and
11187 /// then reduce the shuffle to a single-input permutation.
11188 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11189 SDValue V1, SDValue V2,
11190 ArrayRef<int> Mask,
11192 bool ImmBlends = false) {
11193 // We build up the blend mask while checking whether a blend is a viable way
11194 // to reduce the shuffle.
11195 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11196 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11198 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11202 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11204 if (BlendMask[Mask[i] % Size] < 0)
11205 BlendMask[Mask[i] % Size] = Mask[i];
11206 else if (BlendMask[Mask[i] % Size] != Mask[i])
11207 return SDValue(); // Can't blend in the needed input!
11209 PermuteMask[i] = Mask[i] % Size;
11212 // If only immediate blends, then bail if the blend mask can't be widened to
11214 unsigned EltSize = VT.getScalarSizeInBits();
11215 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11218 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11219 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11222 /// Try to lower as an unpack of elements from two inputs followed by
11223 /// a single-input permutation.
11225 /// This matches the pattern where we can unpack elements from two inputs and
11226 /// then reduce the shuffle to a single-input (wider) permutation.
11227 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11228 SDValue V1, SDValue V2,
11229 ArrayRef<int> Mask,
11230 SelectionDAG &DAG) {
11231 int NumElts = Mask.size();
11232 int NumLanes = VT.getSizeInBits() / 128;
11233 int NumLaneElts = NumElts / NumLanes;
11234 int NumHalfLaneElts = NumLaneElts / 2;
11236 bool MatchLo = true, MatchHi = true;
11237 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11239 // Determine UNPCKL/UNPCKH type and operand order.
11240 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11241 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11242 int M = Mask[Lane + Elt];
11246 SDValue &Op = Ops[Elt & 1];
11247 if (M < NumElts && (Op.isUndef() || Op == V1))
11249 else if (NumElts <= M && (Op.isUndef() || Op == V2))
11254 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11255 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11256 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11257 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11258 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11259 if (!MatchLo && !MatchHi)
11263 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11265 // Now check that each pair of elts come from the same unpack pair
11266 // and set the permute mask based on each pair.
11267 // TODO - Investigate cases where we permute individual elements.
11268 SmallVector<int, 32> PermuteMask(NumElts, -1);
11269 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11270 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11271 int M0 = Mask[Lane + Elt + 0];
11272 int M1 = Mask[Lane + Elt + 1];
11273 if (0 <= M0 && 0 <= M1 &&
11274 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11277 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11279 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11283 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11284 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11285 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11288 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11289 /// permuting the elements of the result in place.
11290 static SDValue lowerShuffleAsByteRotateAndPermute(
11291 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11292 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11293 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11294 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11295 (VT.is512BitVector() && !Subtarget.hasBWI()))
11298 // We don't currently support lane crossing permutes.
11299 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11302 int Scale = VT.getScalarSizeInBits() / 8;
11303 int NumLanes = VT.getSizeInBits() / 128;
11304 int NumElts = VT.getVectorNumElements();
11305 int NumEltsPerLane = NumElts / NumLanes;
11307 // Determine range of mask elts.
11308 bool Blend1 = true;
11309 bool Blend2 = true;
11310 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11311 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11312 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11313 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11314 int M = Mask[Lane + Elt];
11318 Blend1 &= (M == (Lane + Elt));
11319 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11320 M = M % NumEltsPerLane;
11321 Range1.first = std::min(Range1.first, M);
11322 Range1.second = std::max(Range1.second, M);
11325 Blend2 &= (M == (Lane + Elt));
11326 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11327 M = M % NumEltsPerLane;
11328 Range2.first = std::min(Range2.first, M);
11329 Range2.second = std::max(Range2.second, M);
11334 // Bail if we don't need both elements.
11335 // TODO - it might be worth doing this for unary shuffles if the permute
11337 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11338 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11341 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11344 // Rotate the 2 ops so we can access both ranges, then permute the result.
11345 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11346 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11347 SDValue Rotate = DAG.getBitcast(
11348 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11349 DAG.getBitcast(ByteVT, Lo),
11350 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11351 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11352 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11353 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11354 int M = Mask[Lane + Elt];
11358 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11360 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11363 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11366 // Check if the ranges are small enough to rotate from either direction.
11367 if (Range2.second < Range1.first)
11368 return RotateAndPermute(V1, V2, Range1.first, 0);
11369 if (Range1.second < Range2.first)
11370 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11374 /// Generic routine to decompose a shuffle and blend into independent
11375 /// blends and permutes.
11377 /// This matches the extremely common pattern for handling combined
11378 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11379 /// operations. It will try to pick the best arrangement of shuffles and
11381 static SDValue lowerShuffleAsDecomposedShuffleBlend(
11382 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11383 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11384 // Shuffle the input elements into the desired positions in V1 and V2 and
11385 // blend them together.
11386 SmallVector<int, 32> V1Mask(Mask.size(), -1);
11387 SmallVector<int, 32> V2Mask(Mask.size(), -1);
11388 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11389 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11390 if (Mask[i] >= 0 && Mask[i] < Size) {
11391 V1Mask[i] = Mask[i];
11393 } else if (Mask[i] >= Size) {
11394 V2Mask[i] = Mask[i] - Size;
11395 BlendMask[i] = i + Size;
11398 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11399 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11400 // the shuffle may be able to fold with a load or other benefit. However, when
11401 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11402 // pre-shuffle first is a better strategy.
11403 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11404 // Only prefer immediate blends to unpack/rotate.
11405 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11408 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11411 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11412 DL, VT, V1, V2, Mask, Subtarget, DAG))
11414 // Unpack/rotate failed - try again with variable blends.
11415 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11420 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11421 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11422 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11425 /// Try to lower a vector shuffle as a rotation.
11427 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11428 static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11429 int NumElts = Mask.size();
11431 // We need to detect various ways of spelling a rotation:
11432 // [11, 12, 13, 14, 15, 0, 1, 2]
11433 // [-1, 12, 13, 14, -1, -1, 1, -1]
11434 // [-1, -1, -1, -1, -1, -1, 1, 2]
11435 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11436 // [-1, 4, 5, 6, -1, -1, 9, -1]
11437 // [-1, 4, 5, 6, -1, -1, -1, -1]
11440 for (int i = 0; i < NumElts; ++i) {
11442 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11443 "Unexpected mask index.");
11447 // Determine where a rotated vector would have started.
11448 int StartIdx = i - (M % NumElts);
11450 // The identity rotation isn't interesting, stop.
11453 // If we found the tail of a vector the rotation must be the missing
11454 // front. If we found the head of a vector, it must be how much of the
11456 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11459 Rotation = CandidateRotation;
11460 else if (Rotation != CandidateRotation)
11461 // The rotations don't match, so we can't match this mask.
11464 // Compute which value this mask is pointing at.
11465 SDValue MaskV = M < NumElts ? V1 : V2;
11467 // Compute which of the two target values this index should be assigned
11468 // to. This reflects whether the high elements are remaining or the low
11469 // elements are remaining.
11470 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11472 // Either set up this value if we've not encountered it before, or check
11473 // that it remains consistent.
11476 else if (TargetV != MaskV)
11477 // This may be a rotation, but it pulls from the inputs in some
11478 // unsupported interleaving.
11482 // Check that we successfully analyzed the mask, and normalize the results.
11483 assert(Rotation != 0 && "Failed to locate a viable rotation!");
11484 assert((Lo || Hi) && "Failed to find a rotated input vector!");
11496 /// Try to lower a vector shuffle as a byte rotation.
11498 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11499 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11500 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11501 /// try to generically lower a vector shuffle through such an pattern. It
11502 /// does not check for the profitability of lowering either as PALIGNR or
11503 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11504 /// This matches shuffle vectors that look like:
11506 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11508 /// Essentially it concatenates V1 and V2, shifts right by some number of
11509 /// elements, and takes the low elements as the result. Note that while this is
11510 /// specified as a *right shift* because x86 is little-endian, it is a *left
11511 /// rotate* of the vector lanes.
11512 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11513 ArrayRef<int> Mask) {
11514 // Don't accept any shuffles with zero elements.
11515 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11518 // PALIGNR works on 128-bit lanes.
11519 SmallVector<int, 16> RepeatedMask;
11520 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11523 int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11527 // PALIGNR rotates bytes, so we need to scale the
11528 // rotation based on how many bytes are in the vector lane.
11529 int NumElts = RepeatedMask.size();
11530 int Scale = 16 / NumElts;
11531 return Rotation * Scale;
11534 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11535 SDValue V2, ArrayRef<int> Mask,
11536 const X86Subtarget &Subtarget,
11537 SelectionDAG &DAG) {
11538 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11540 SDValue Lo = V1, Hi = V2;
11541 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11542 if (ByteRotation <= 0)
11545 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11547 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11548 Lo = DAG.getBitcast(ByteVT, Lo);
11549 Hi = DAG.getBitcast(ByteVT, Hi);
11551 // SSSE3 targets can use the palignr instruction.
11552 if (Subtarget.hasSSSE3()) {
11553 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11554 "512-bit PALIGNR requires BWI instructions");
11555 return DAG.getBitcast(
11556 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11557 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11560 assert(VT.is128BitVector() &&
11561 "Rotate-based lowering only supports 128-bit lowering!");
11562 assert(Mask.size() <= 16 &&
11563 "Can shuffle at most 16 bytes in a 128-bit vector!");
11564 assert(ByteVT == MVT::v16i8 &&
11565 "SSE2 rotate lowering only needed for v16i8!");
11567 // Default SSE2 implementation
11568 int LoByteShift = 16 - ByteRotation;
11569 int HiByteShift = ByteRotation;
11572 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11573 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11575 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11576 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11577 return DAG.getBitcast(VT,
11578 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11581 /// Try to lower a vector shuffle as a dword/qword rotation.
11583 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11584 /// rotation of the concatenation of two vectors; This routine will
11585 /// try to generically lower a vector shuffle through such an pattern.
11587 /// Essentially it concatenates V1 and V2, shifts right by some number of
11588 /// elements, and takes the low elements as the result. Note that while this is
11589 /// specified as a *right shift* because x86 is little-endian, it is a *left
11590 /// rotate* of the vector lanes.
11591 static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11592 SDValue V2, ArrayRef<int> Mask,
11593 const X86Subtarget &Subtarget,
11594 SelectionDAG &DAG) {
11595 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11596 "Only 32-bit and 64-bit elements are supported!");
11598 // 128/256-bit vectors are only supported with VLX.
11599 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11600 && "VLX required for 128/256-bit vectors");
11602 SDValue Lo = V1, Hi = V2;
11603 int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11607 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11608 DAG.getTargetConstant(Rotation, DL, MVT::i8));
11611 /// Try to lower a vector shuffle as a byte shift sequence.
11612 static SDValue lowerVectorShuffleAsByteShiftMask(
11613 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11614 const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11615 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11616 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11618 // We need a shuffle that has zeros at one/both ends and a sequential
11619 // shuffle from one source within.
11620 unsigned ZeroLo = Zeroable.countTrailingOnes();
11621 unsigned ZeroHi = Zeroable.countLeadingOnes();
11622 if (!ZeroLo && !ZeroHi)
11625 unsigned NumElts = Mask.size();
11626 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11627 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11630 unsigned Scale = VT.getScalarSizeInBits() / 8;
11631 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11632 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11633 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11636 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11637 Res = DAG.getBitcast(MVT::v16i8, Res);
11639 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11640 // inner sequential set of elements, possibly offset:
11641 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11642 // 01234567 --> 4567zzzz --> zzzzz456
11643 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11645 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11646 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11647 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11648 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11649 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11650 } else if (ZeroHi == 0) {
11651 unsigned Shift = Mask[ZeroLo] % NumElts;
11652 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11653 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11654 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11655 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11656 } else if (!Subtarget.hasSSSE3()) {
11657 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11658 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11659 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11660 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11661 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11662 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11663 Shift += Mask[ZeroLo] % NumElts;
11664 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11665 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11666 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11667 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11671 return DAG.getBitcast(VT, Res);
11674 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11676 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11677 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11678 /// matches elements from one of the input vectors shuffled to the left or
11679 /// right with zeroable elements 'shifted in'. It handles both the strictly
11680 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11681 /// quad word lane.
11683 /// PSHL : (little-endian) left bit shift.
11684 /// [ zz, 0, zz, 2 ]
11685 /// [ -1, 4, zz, -1 ]
11686 /// PSRL : (little-endian) right bit shift.
11687 /// [ 1, zz, 3, zz]
11688 /// [ -1, -1, 7, zz]
11689 /// PSLLDQ : (little-endian) left byte shift
11690 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
11691 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
11692 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
11693 /// PSRLDQ : (little-endian) right byte shift
11694 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
11695 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
11696 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
11697 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11698 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11699 int MaskOffset, const APInt &Zeroable,
11700 const X86Subtarget &Subtarget) {
11701 int Size = Mask.size();
11702 unsigned SizeInBits = Size * ScalarSizeInBits;
11704 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11705 for (int i = 0; i < Size; i += Scale)
11706 for (int j = 0; j < Shift; ++j)
11707 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11713 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11714 for (int i = 0; i != Size; i += Scale) {
11715 unsigned Pos = Left ? i + Shift : i;
11716 unsigned Low = Left ? i : i + Shift;
11717 unsigned Len = Scale - Shift;
11718 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11722 int ShiftEltBits = ScalarSizeInBits * Scale;
11723 bool ByteShift = ShiftEltBits > 64;
11724 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11725 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11726 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11728 // Normalize the scale for byte shifts to still produce an i64 element
11730 Scale = ByteShift ? Scale / 2 : Scale;
11732 // We need to round trip through the appropriate type for the shift.
11733 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11734 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11735 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11736 return (int)ShiftAmt;
11739 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11740 // keep doubling the size of the integer elements up to that. We can
11741 // then shift the elements of the integer vector by whole multiples of
11742 // their width within the elements of the larger integer vector. Test each
11743 // multiple to see if we can find a match with the moved element indices
11744 // and that the shifted in elements are all zeroable.
11745 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11746 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11747 for (int Shift = 1; Shift != Scale; ++Shift)
11748 for (bool Left : {true, false})
11749 if (CheckZeros(Shift, Scale, Left)) {
11750 int ShiftAmt = MatchShift(Shift, Scale, Left);
11759 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11760 SDValue V2, ArrayRef<int> Mask,
11761 const APInt &Zeroable,
11762 const X86Subtarget &Subtarget,
11763 SelectionDAG &DAG) {
11764 int Size = Mask.size();
11765 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11771 // Try to match shuffle against V1 shift.
11772 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11773 Mask, 0, Zeroable, Subtarget);
11775 // If V1 failed, try to match shuffle against V2 shift.
11776 if (ShiftAmt < 0) {
11777 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11778 Mask, Size, Zeroable, Subtarget);
11785 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11786 "Illegal integer vector type");
11787 V = DAG.getBitcast(ShiftVT, V);
11788 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11789 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11790 return DAG.getBitcast(VT, V);
11793 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11794 // Remainder of lower half result is zero and upper half is all undef.
11795 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11796 ArrayRef<int> Mask, uint64_t &BitLen,
11797 uint64_t &BitIdx, const APInt &Zeroable) {
11798 int Size = Mask.size();
11799 int HalfSize = Size / 2;
11800 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11801 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
11803 // Upper half must be undefined.
11804 if (!isUndefUpperHalf(Mask))
11807 // Determine the extraction length from the part of the
11808 // lower half that isn't zeroable.
11809 int Len = HalfSize;
11810 for (; Len > 0; --Len)
11811 if (!Zeroable[Len - 1])
11813 assert(Len > 0 && "Zeroable shuffle mask");
11815 // Attempt to match first Len sequential elements from the lower half.
11818 for (int i = 0; i != Len; ++i) {
11820 if (M == SM_SentinelUndef)
11822 SDValue &V = (M < Size ? V1 : V2);
11825 // The extracted elements must start at a valid index and all mask
11826 // elements must be in the lower half.
11827 if (i > M || M >= HalfSize)
11830 if (Idx < 0 || (Src == V && Idx == (M - i))) {
11838 if (!Src || Idx < 0)
11841 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11842 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11843 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11848 // INSERTQ: Extract lowest Len elements from lower half of second source and
11849 // insert over first source, starting at Idx.
11850 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11851 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11852 ArrayRef<int> Mask, uint64_t &BitLen,
11853 uint64_t &BitIdx) {
11854 int Size = Mask.size();
11855 int HalfSize = Size / 2;
11856 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11858 // Upper half must be undefined.
11859 if (!isUndefUpperHalf(Mask))
11862 for (int Idx = 0; Idx != HalfSize; ++Idx) {
11865 // Attempt to match first source from mask before insertion point.
11866 if (isUndefInRange(Mask, 0, Idx)) {
11868 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11870 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11876 // Extend the extraction length looking to match both the insertion of
11877 // the second source and the remaining elements of the first.
11878 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11880 int Len = Hi - Idx;
11882 // Match insertion.
11883 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11885 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11891 // Match the remaining elements of the lower half.
11892 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11894 } else if ((!Base || (Base == V1)) &&
11895 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11897 } else if ((!Base || (Base == V2)) &&
11898 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11905 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11906 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11916 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11917 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11918 SDValue V2, ArrayRef<int> Mask,
11919 const APInt &Zeroable, SelectionDAG &DAG) {
11920 uint64_t BitLen, BitIdx;
11921 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11922 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11923 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11924 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11926 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11927 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11928 V2 ? V2 : DAG.getUNDEF(VT),
11929 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11930 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11935 /// Lower a vector shuffle as a zero or any extension.
11937 /// Given a specific number of elements, element bit width, and extension
11938 /// stride, produce either a zero or any extension based on the available
11939 /// features of the subtarget. The extended elements are consecutive and
11940 /// begin and can start from an offsetted element index in the input; to
11941 /// avoid excess shuffling the offset must either being in the bottom lane
11942 /// or at the start of a higher lane. All extended elements must be from
11944 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11945 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11946 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11947 assert(Scale > 1 && "Need a scale to extend.");
11948 int EltBits = VT.getScalarSizeInBits();
11949 int NumElements = VT.getVectorNumElements();
11950 int NumEltsPerLane = 128 / EltBits;
11951 int OffsetLane = Offset / NumEltsPerLane;
11952 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11953 "Only 8, 16, and 32 bit elements can be extended.");
11954 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11955 assert(0 <= Offset && "Extension offset must be positive.");
11956 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11957 "Extension offset must be in the first lane or start an upper lane.");
11959 // Check that an index is in same lane as the base offset.
11960 auto SafeOffset = [&](int Idx) {
11961 return OffsetLane == (Idx / NumEltsPerLane);
11964 // Shift along an input so that the offset base moves to the first element.
11965 auto ShuffleOffset = [&](SDValue V) {
11969 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11970 for (int i = 0; i * Scale < NumElements; ++i) {
11971 int SrcIdx = i + Offset;
11972 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11974 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11977 // Found a valid a/zext mask! Try various lowering strategies based on the
11978 // input type and available ISA extensions.
11979 if (Subtarget.hasSSE41()) {
11980 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11981 // PUNPCK will catch this in a later shuffle match.
11982 if (Offset && Scale == 2 && VT.is128BitVector())
11984 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11985 NumElements / Scale);
11986 InputV = ShuffleOffset(InputV);
11987 InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
11988 ExtVT, InputV, DAG);
11989 return DAG.getBitcast(VT, InputV);
11992 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11994 // For any extends we can cheat for larger element sizes and use shuffle
11995 // instructions that can fold with a load and/or copy.
11996 if (AnyExt && EltBits == 32) {
11997 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11999 return DAG.getBitcast(
12000 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12001 DAG.getBitcast(MVT::v4i32, InputV),
12002 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12004 if (AnyExt && EltBits == 16 && Scale > 2) {
12005 int PSHUFDMask[4] = {Offset / 2, -1,
12006 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12007 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12008 DAG.getBitcast(MVT::v4i32, InputV),
12009 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12010 int PSHUFWMask[4] = {1, -1, -1, -1};
12011 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12012 return DAG.getBitcast(
12013 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12014 DAG.getBitcast(MVT::v8i16, InputV),
12015 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12018 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12020 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12021 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12022 assert(VT.is128BitVector() && "Unexpected vector width!");
12024 int LoIdx = Offset * EltBits;
12025 SDValue Lo = DAG.getBitcast(
12026 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12027 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12028 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12030 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12031 return DAG.getBitcast(VT, Lo);
12033 int HiIdx = (Offset + 1) * EltBits;
12034 SDValue Hi = DAG.getBitcast(
12035 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12036 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12037 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12038 return DAG.getBitcast(VT,
12039 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12042 // If this would require more than 2 unpack instructions to expand, use
12043 // pshufb when available. We can only use more than 2 unpack instructions
12044 // when zero extending i8 elements which also makes it easier to use pshufb.
12045 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12046 assert(NumElements == 16 && "Unexpected byte vector width!");
12047 SDValue PSHUFBMask[16];
12048 for (int i = 0; i < 16; ++i) {
12049 int Idx = Offset + (i / Scale);
12050 if ((i % Scale == 0 && SafeOffset(Idx))) {
12051 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12055 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12057 InputV = DAG.getBitcast(MVT::v16i8, InputV);
12058 return DAG.getBitcast(
12059 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12060 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12063 // If we are extending from an offset, ensure we start on a boundary that
12064 // we can unpack from.
12065 int AlignToUnpack = Offset % (NumElements / Scale);
12066 if (AlignToUnpack) {
12067 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12068 for (int i = AlignToUnpack; i < NumElements; ++i)
12069 ShMask[i - AlignToUnpack] = i;
12070 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12071 Offset -= AlignToUnpack;
12074 // Otherwise emit a sequence of unpacks.
12076 unsigned UnpackLoHi = X86ISD::UNPCKL;
12077 if (Offset >= (NumElements / 2)) {
12078 UnpackLoHi = X86ISD::UNPCKH;
12079 Offset -= (NumElements / 2);
12082 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12083 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12084 : getZeroVector(InputVT, Subtarget, DAG, DL);
12085 InputV = DAG.getBitcast(InputVT, InputV);
12086 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12090 } while (Scale > 1);
12091 return DAG.getBitcast(VT, InputV);
12094 /// Try to lower a vector shuffle as a zero extension on any microarch.
12096 /// This routine will try to do everything in its power to cleverly lower
12097 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
12098 /// check for the profitability of this lowering, it tries to aggressively
12099 /// match this pattern. It will use all of the micro-architectural details it
12100 /// can to emit an efficient lowering. It handles both blends with all-zero
12101 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12102 /// masking out later).
12104 /// The reason we have dedicated lowering for zext-style shuffles is that they
12105 /// are both incredibly common and often quite performance sensitive.
12106 static SDValue lowerShuffleAsZeroOrAnyExtend(
12107 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12108 const APInt &Zeroable, const X86Subtarget &Subtarget,
12109 SelectionDAG &DAG) {
12110 int Bits = VT.getSizeInBits();
12111 int NumLanes = Bits / 128;
12112 int NumElements = VT.getVectorNumElements();
12113 int NumEltsPerLane = NumElements / NumLanes;
12114 assert(VT.getScalarSizeInBits() <= 32 &&
12115 "Exceeds 32-bit integer zero extension limit");
12116 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12118 // Define a helper function to check a particular ext-scale and lower to it if
12120 auto Lower = [&](int Scale) -> SDValue {
12122 bool AnyExt = true;
12125 for (int i = 0; i < NumElements; ++i) {
12128 continue; // Valid anywhere but doesn't tell us anything.
12129 if (i % Scale != 0) {
12130 // Each of the extended elements need to be zeroable.
12134 // We no longer are in the anyext case.
12139 // Each of the base elements needs to be consecutive indices into the
12140 // same input vector.
12141 SDValue V = M < NumElements ? V1 : V2;
12142 M = M % NumElements;
12145 Offset = M - (i / Scale);
12146 } else if (InputV != V)
12147 return SDValue(); // Flip-flopping inputs.
12149 // Offset must start in the lowest 128-bit lane or at the start of an
12151 // FIXME: Is it ever worth allowing a negative base offset?
12152 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12153 (Offset % NumEltsPerLane) == 0))
12156 // If we are offsetting, all referenced entries must come from the same
12158 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12161 if ((M % NumElements) != (Offset + (i / Scale)))
12162 return SDValue(); // Non-consecutive strided elements.
12166 // If we fail to find an input, we have a zero-shuffle which should always
12167 // have already been handled.
12168 // FIXME: Maybe handle this here in case during blending we end up with one?
12172 // If we are offsetting, don't extend if we only match a single input, we
12173 // can always do better by using a basic PSHUF or PUNPCK.
12174 if (Offset != 0 && Matches < 2)
12177 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12178 InputV, Mask, Subtarget, DAG);
12181 // The widest scale possible for extending is to a 64-bit integer.
12182 assert(Bits % 64 == 0 &&
12183 "The number of bits in a vector must be divisible by 64 on x86!");
12184 int NumExtElements = Bits / 64;
12186 // Each iteration, try extending the elements half as much, but into twice as
12188 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12189 assert(NumElements % NumExtElements == 0 &&
12190 "The input vector size must be divisible by the extended size.");
12191 if (SDValue V = Lower(NumElements / NumExtElements))
12195 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12199 // Returns one of the source operands if the shuffle can be reduced to a
12200 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12201 auto CanZExtLowHalf = [&]() {
12202 for (int i = NumElements / 2; i != NumElements; ++i)
12205 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12207 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12212 if (SDValue V = CanZExtLowHalf()) {
12213 V = DAG.getBitcast(MVT::v2i64, V);
12214 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12215 return DAG.getBitcast(VT, V);
12218 // No viable ext lowering found.
12222 /// Try to get a scalar value for a specific element of a vector.
12224 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12225 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12226 SelectionDAG &DAG) {
12227 MVT VT = V.getSimpleValueType();
12228 MVT EltVT = VT.getVectorElementType();
12229 V = peekThroughBitcasts(V);
12231 // If the bitcasts shift the element size, we can't extract an equivalent
12232 // element from it.
12233 MVT NewVT = V.getSimpleValueType();
12234 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12237 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12238 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12239 // Ensure the scalar operand is the same size as the destination.
12240 // FIXME: Add support for scalar truncation where possible.
12241 SDValue S = V.getOperand(Idx);
12242 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12243 return DAG.getBitcast(EltVT, S);
12249 /// Helper to test for a load that can be folded with x86 shuffles.
12251 /// This is particularly important because the set of instructions varies
12252 /// significantly based on whether the operand is a load or not.
12253 static bool isShuffleFoldableLoad(SDValue V) {
12254 V = peekThroughBitcasts(V);
12255 return ISD::isNON_EXTLoad(V.getNode());
12258 /// Try to lower insertion of a single element into a zero vector.
12260 /// This is a common pattern that we have especially efficient patterns to lower
12261 /// across all subtarget feature sets.
12262 static SDValue lowerShuffleAsElementInsertion(
12263 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12264 const APInt &Zeroable, const X86Subtarget &Subtarget,
12265 SelectionDAG &DAG) {
12267 MVT EltVT = VT.getVectorElementType();
12270 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12272 bool IsV1Zeroable = true;
12273 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12274 if (i != V2Index && !Zeroable[i]) {
12275 IsV1Zeroable = false;
12279 // Check for a single input from a SCALAR_TO_VECTOR node.
12280 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12281 // all the smarts here sunk into that routine. However, the current
12282 // lowering of BUILD_VECTOR makes that nearly impossible until the old
12283 // vector shuffle lowering is dead.
12284 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12286 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12287 // We need to zext the scalar if it is smaller than an i32.
12288 V2S = DAG.getBitcast(EltVT, V2S);
12289 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12290 // Using zext to expand a narrow element won't work for non-zero
12295 // Zero-extend directly to i32.
12296 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12297 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12299 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12300 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12301 EltVT == MVT::i16) {
12302 // Either not inserting from the low element of the input or the input
12303 // element size is too small to use VZEXT_MOVL to clear the high bits.
12307 if (!IsV1Zeroable) {
12308 // If V1 can't be treated as a zero vector we have fewer options to lower
12309 // this. We can't support integer vectors or non-zero targets cheaply, and
12310 // the V1 elements can't be permuted in any way.
12311 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12312 if (!VT.isFloatingPoint() || V2Index != 0)
12314 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12315 V1Mask[V2Index] = -1;
12316 if (!isNoopShuffleMask(V1Mask))
12318 if (!VT.is128BitVector())
12321 // Otherwise, use MOVSD or MOVSS.
12322 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12323 "Only two types of floating point element types to handle!");
12324 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12328 // This lowering only works for the low element with floating point vectors.
12329 if (VT.isFloatingPoint() && V2Index != 0)
12332 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12334 V2 = DAG.getBitcast(VT, V2);
12336 if (V2Index != 0) {
12337 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12338 // the desired position. Otherwise it is more efficient to do a vector
12339 // shift left. We know that we can do a vector shift left because all
12340 // the inputs are zero.
12341 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12342 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12343 V2Shuffle[V2Index] = 0;
12344 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12346 V2 = DAG.getBitcast(MVT::v16i8, V2);
12347 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12348 DAG.getTargetConstant(
12349 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12350 V2 = DAG.getBitcast(VT, V2);
12356 /// Try to lower broadcast of a single - truncated - integer element,
12357 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12359 /// This assumes we have AVX2.
12360 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12362 const X86Subtarget &Subtarget,
12363 SelectionDAG &DAG) {
12364 assert(Subtarget.hasAVX2() &&
12365 "We can only lower integer broadcasts with AVX2!");
12367 EVT EltVT = VT.getVectorElementType();
12368 EVT V0VT = V0.getValueType();
12370 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12371 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12373 EVT V0EltVT = V0VT.getVectorElementType();
12374 if (!V0EltVT.isInteger())
12377 const unsigned EltSize = EltVT.getSizeInBits();
12378 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12380 // This is only a truncation if the original element type is larger.
12381 if (V0EltSize <= EltSize)
12384 assert(((V0EltSize % EltSize) == 0) &&
12385 "Scalar type sizes must all be powers of 2 on x86!");
12387 const unsigned V0Opc = V0.getOpcode();
12388 const unsigned Scale = V0EltSize / EltSize;
12389 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12391 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12392 V0Opc != ISD::BUILD_VECTOR)
12395 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12397 // If we're extracting non-least-significant bits, shift so we can truncate.
12398 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12399 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12400 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12401 if (const int OffsetIdx = BroadcastIdx % Scale)
12402 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12403 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12405 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12406 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12409 /// Test whether this can be lowered with a single SHUFPS instruction.
12411 /// This is used to disable more specialized lowerings when the shufps lowering
12412 /// will happen to be efficient.
12413 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12414 // This routine only handles 128-bit shufps.
12415 assert(Mask.size() == 4 && "Unsupported mask size!");
12416 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12417 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12418 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12419 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12421 // To lower with a single SHUFPS we need to have the low half and high half
12422 // each requiring a single input.
12423 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12425 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12431 /// If we are extracting two 128-bit halves of a vector and shuffling the
12432 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12433 /// multi-shuffle lowering.
12434 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12435 SDValue N1, ArrayRef<int> Mask,
12436 SelectionDAG &DAG) {
12437 EVT VT = N0.getValueType();
12438 assert((VT.is128BitVector() &&
12439 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12440 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12442 // Check that both sources are extracts of the same source vector.
12443 if (!N0.hasOneUse() || !N1.hasOneUse() ||
12444 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12445 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12446 N0.getOperand(0) != N1.getOperand(0))
12449 SDValue WideVec = N0.getOperand(0);
12450 EVT WideVT = WideVec.getValueType();
12451 if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12452 !isa<ConstantSDNode>(N1.getOperand(1)))
12455 // Match extracts of each half of the wide source vector. Commute the shuffle
12456 // if the extract of the low half is N1.
12457 unsigned NumElts = VT.getVectorNumElements();
12458 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12459 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12460 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12461 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12462 ShuffleVectorSDNode::commuteMask(NewMask);
12463 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12466 // Final bailout: if the mask is simple, we are better off using an extract
12467 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12468 // because that avoids a constant load from memory.
12469 if (NumElts == 4 &&
12470 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12473 // Extend the shuffle mask with undef elements.
12474 NewMask.append(NumElts, -1);
12476 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12477 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12479 // This is free: ymm -> xmm.
12480 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12481 DAG.getIntPtrConstant(0, DL));
12484 /// Try to lower broadcast of a single element.
12486 /// For convenience, this code also bundles all of the subtarget feature set
12487 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12488 /// a convenient way to factor it out.
12489 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12490 SDValue V2, ArrayRef<int> Mask,
12491 const X86Subtarget &Subtarget,
12492 SelectionDAG &DAG) {
12493 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12494 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12495 (Subtarget.hasAVX2() && VT.isInteger())))
12498 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12499 // we can only broadcast from a register with AVX2.
12500 unsigned NumElts = Mask.size();
12501 unsigned NumEltBits = VT.getScalarSizeInBits();
12502 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12504 : X86ISD::VBROADCAST;
12505 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12507 // Check that the mask is a broadcast.
12508 int BroadcastIdx = -1;
12509 for (int i = 0; i != (int)NumElts; ++i) {
12510 SmallVector<int, 8> BroadcastMask(NumElts, i);
12511 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12517 if (BroadcastIdx < 0)
12519 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12520 "a sorted mask where the broadcast "
12523 // Go up the chain of (vector) values to find a scalar load that we can
12524 // combine with the broadcast.
12525 int BitOffset = BroadcastIdx * NumEltBits;
12528 switch (V.getOpcode()) {
12529 case ISD::BITCAST: {
12530 V = V.getOperand(0);
12533 case ISD::CONCAT_VECTORS: {
12534 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12535 int OpIdx = BitOffset / OpBitWidth;
12536 V = V.getOperand(OpIdx);
12537 BitOffset %= OpBitWidth;
12540 case ISD::INSERT_SUBVECTOR: {
12541 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12542 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12546 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12547 int Idx = (int)ConstantIdx->getZExtValue();
12548 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12549 int BeginOffset = Idx * EltBitWidth;
12550 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12551 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12552 BitOffset -= BeginOffset;
12562 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12563 BroadcastIdx = BitOffset / NumEltBits;
12565 // Do we need to bitcast the source to retrieve the original broadcast index?
12566 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12568 // Check if this is a broadcast of a scalar. We special case lowering
12569 // for scalars so that we can more effectively fold with loads.
12570 // If the original value has a larger element type than the shuffle, the
12571 // broadcast element is in essence truncated. Make that explicit to ease
12573 if (BitCastSrc && VT.isInteger())
12574 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12575 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12576 return TruncBroadcast;
12578 MVT BroadcastVT = VT;
12580 // Also check the simpler case, where we can directly reuse the scalar.
12582 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12583 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12584 V = V.getOperand(BroadcastIdx);
12586 // If we can't broadcast from a register, check that the input is a load.
12587 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12589 } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
12590 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12591 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12592 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12593 Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12598 // If we are broadcasting a load that is only used by the shuffle
12599 // then we can reduce the vector load to the broadcasted scalar load.
12600 LoadSDNode *Ld = cast<LoadSDNode>(V);
12601 SDValue BaseAddr = Ld->getOperand(1);
12602 EVT SVT = BroadcastVT.getScalarType();
12603 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12604 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12605 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12606 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12607 DAG.getMachineFunction().getMachineMemOperand(
12608 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12609 DAG.makeEquivalentMemoryOrdering(Ld, V);
12610 } else if (!BroadcastFromReg) {
12611 // We can't broadcast from a vector register.
12613 } else if (BitOffset != 0) {
12614 // We can only broadcast from the zero-element of a vector register,
12615 // but it can be advantageous to broadcast from the zero-element of a
12617 if (!VT.is256BitVector() && !VT.is512BitVector())
12620 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12621 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12624 // Only broadcast the zero-element of a 128-bit subvector.
12625 if ((BitOffset % 128) != 0)
12628 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12629 "Unexpected bit-offset");
12630 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12631 "Unexpected vector size");
12632 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12633 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12636 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12637 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12638 DAG.getBitcast(MVT::f64, V));
12640 // Bitcast back to the same scalar type as BroadcastVT.
12641 if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12642 assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12643 "Unexpected vector element size");
12645 if (V.getValueType().isVector()) {
12646 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12647 ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12649 ExtVT = BroadcastVT.getScalarType();
12651 V = DAG.getBitcast(ExtVT, V);
12654 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12655 if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12656 V = DAG.getBitcast(MVT::f64, V);
12657 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12658 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12661 // We only support broadcasting from 128-bit vectors to minimize the
12662 // number of patterns we need to deal with in isel. So extract down to
12663 // 128-bits, removing as many bitcasts as possible.
12664 if (V.getValueSizeInBits() > 128) {
12665 MVT ExtVT = V.getSimpleValueType().getScalarType();
12666 ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12667 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12668 V = DAG.getBitcast(ExtVT, V);
12671 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12674 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12675 // INSERTPS when the V1 elements are already in the correct locations
12676 // because otherwise we can just always use two SHUFPS instructions which
12677 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12678 // perform INSERTPS if a single V1 element is out of place and all V2
12679 // elements are zeroable.
12680 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12681 unsigned &InsertPSMask,
12682 const APInt &Zeroable,
12683 ArrayRef<int> Mask, SelectionDAG &DAG) {
12684 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12685 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12686 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12688 // Attempt to match INSERTPS with one element from VA or VB being
12689 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12691 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12692 ArrayRef<int> CandidateMask) {
12693 unsigned ZMask = 0;
12694 int VADstIndex = -1;
12695 int VBDstIndex = -1;
12696 bool VAUsedInPlace = false;
12698 for (int i = 0; i < 4; ++i) {
12699 // Synthesize a zero mask from the zeroable elements (includes undefs).
12705 // Flag if we use any VA inputs in place.
12706 if (i == CandidateMask[i]) {
12707 VAUsedInPlace = true;
12711 // We can only insert a single non-zeroable element.
12712 if (VADstIndex >= 0 || VBDstIndex >= 0)
12715 if (CandidateMask[i] < 4) {
12716 // VA input out of place for insertion.
12719 // VB input for insertion.
12724 // Don't bother if we have no (non-zeroable) element for insertion.
12725 if (VADstIndex < 0 && VBDstIndex < 0)
12728 // Determine element insertion src/dst indices. The src index is from the
12729 // start of the inserted vector, not the start of the concatenated vector.
12730 unsigned VBSrcIndex = 0;
12731 if (VADstIndex >= 0) {
12732 // If we have a VA input out of place, we use VA as the V2 element
12733 // insertion and don't use the original V2 at all.
12734 VBSrcIndex = CandidateMask[VADstIndex];
12735 VBDstIndex = VADstIndex;
12738 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12741 // If no V1 inputs are used in place, then the result is created only from
12742 // the zero mask and the V2 insertion - so remove V1 dependency.
12743 if (!VAUsedInPlace)
12744 VA = DAG.getUNDEF(MVT::v4f32);
12746 // Update V1, V2 and InsertPSMask accordingly.
12750 // Insert the V2 element into the desired position.
12751 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12752 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12756 if (matchAsInsertPS(V1, V2, Mask))
12759 // Commute and try again.
12760 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12761 ShuffleVectorSDNode::commuteMask(CommutedMask);
12762 if (matchAsInsertPS(V2, V1, CommutedMask))
12768 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12769 ArrayRef<int> Mask, const APInt &Zeroable,
12770 SelectionDAG &DAG) {
12771 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12772 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12774 // Attempt to match the insertps pattern.
12775 unsigned InsertPSMask;
12776 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12779 // Insert the V2 element into the desired position.
12780 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12781 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12784 /// Try to lower a shuffle as a permute of the inputs followed by an
12785 /// UNPCK instruction.
12787 /// This specifically targets cases where we end up with alternating between
12788 /// the two inputs, and so can permute them into something that feeds a single
12789 /// UNPCK instruction. Note that this routine only targets integer vectors
12790 /// because for floating point vectors we have a generalized SHUFPS lowering
12791 /// strategy that handles everything that doesn't *exactly* match an unpack,
12792 /// making this clever lowering unnecessary.
12793 static SDValue lowerShuffleAsPermuteAndUnpack(
12794 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12795 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12796 assert(!VT.isFloatingPoint() &&
12797 "This routine only supports integer vectors.");
12798 assert(VT.is128BitVector() &&
12799 "This routine only works on 128-bit vectors.");
12800 assert(!V2.isUndef() &&
12801 "This routine should only be used when blending two inputs.");
12802 assert(Mask.size() >= 2 && "Single element masks are invalid.");
12804 int Size = Mask.size();
12807 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
12809 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
12811 bool UnpackLo = NumLoInputs >= NumHiInputs;
12813 auto TryUnpack = [&](int ScalarSize, int Scale) {
12814 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
12815 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
12817 for (int i = 0; i < Size; ++i) {
12821 // Each element of the unpack contains Scale elements from this mask.
12822 int UnpackIdx = i / Scale;
12824 // We only handle the case where V1 feeds the first slots of the unpack.
12825 // We rely on canonicalization to ensure this is the case.
12826 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
12829 // Setup the mask for this input. The indexing is tricky as we have to
12830 // handle the unpack stride.
12831 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
12832 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
12836 // If we will have to shuffle both inputs to use the unpack, check whether
12837 // we can just unpack first and shuffle the result. If so, skip this unpack.
12838 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
12839 !isNoopShuffleMask(V2Mask))
12842 // Shuffle the inputs into place.
12843 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12844 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12846 // Cast the inputs to the type we will use to unpack them.
12847 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
12848 V1 = DAG.getBitcast(UnpackVT, V1);
12849 V2 = DAG.getBitcast(UnpackVT, V2);
12851 // Unpack the inputs and cast the result back to the desired type.
12852 return DAG.getBitcast(
12853 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
12854 UnpackVT, V1, V2));
12857 // We try each unpack from the largest to the smallest to try and find one
12858 // that fits this mask.
12859 int OrigScalarSize = VT.getScalarSizeInBits();
12860 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
12861 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
12864 // If we're shuffling with a zero vector then we're better off not doing
12865 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
12866 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
12867 ISD::isBuildVectorAllZeros(V2.getNode()))
12870 // If none of the unpack-rooted lowerings worked (or were profitable) try an
12872 if (NumLoInputs == 0 || NumHiInputs == 0) {
12873 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
12874 "We have to have *some* inputs!");
12875 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
12877 // FIXME: We could consider the total complexity of the permute of each
12878 // possible unpacking. Or at the least we should consider how many
12879 // half-crossings are created.
12880 // FIXME: We could consider commuting the unpacks.
12882 SmallVector<int, 32> PermMask((unsigned)Size, -1);
12883 for (int i = 0; i < Size; ++i) {
12887 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
12890 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
12892 return DAG.getVectorShuffle(
12893 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
12895 DAG.getUNDEF(VT), PermMask);
12901 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12903 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12904 /// support for floating point shuffles but not integer shuffles. These
12905 /// instructions will incur a domain crossing penalty on some chips though so
12906 /// it is better to avoid lowering through this for integer vectors where
12908 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12909 const APInt &Zeroable, SDValue V1, SDValue V2,
12910 const X86Subtarget &Subtarget,
12911 SelectionDAG &DAG) {
12912 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12913 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12914 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12916 if (V2.isUndef()) {
12917 // Check for being able to broadcast a single element.
12918 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12919 Mask, Subtarget, DAG))
12922 // Straight shuffle of a single input vector. Simulate this by using the
12923 // single input as both of the "inputs" to this instruction..
12924 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12926 if (Subtarget.hasAVX()) {
12927 // If we have AVX, we can use VPERMILPS which will allow folding a load
12928 // into the shuffle.
12929 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12930 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12933 return DAG.getNode(
12934 X86ISD::SHUFP, DL, MVT::v2f64,
12935 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12936 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12937 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12939 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12940 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12941 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12942 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12944 if (Subtarget.hasAVX2())
12945 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12948 // When loading a scalar and then shuffling it into a vector we can often do
12949 // the insertion cheaply.
12950 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12951 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12953 // Try inverting the insertion since for v2 masks it is easy to do and we
12954 // can't reliably sort the mask one way or the other.
12955 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12956 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12957 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12958 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12961 // Try to use one of the special instruction patterns to handle two common
12962 // blend patterns if a zero-blend above didn't work.
12963 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
12964 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
12965 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12966 // We can either use a special instruction to load over the low double or
12967 // to move just the low double.
12968 return DAG.getNode(
12969 X86ISD::MOVSD, DL, MVT::v2f64, V2,
12970 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12972 if (Subtarget.hasSSE41())
12973 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12974 Zeroable, Subtarget, DAG))
12977 // Use dedicated unpack instructions for masks that match their pattern.
12978 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12981 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12982 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12983 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12986 /// Handle lowering of 2-lane 64-bit integer shuffles.
12988 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12989 /// the integer unit to minimize domain crossing penalties. However, for blends
12990 /// it falls back to the floating point shuffle operation with appropriate bit
12992 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12993 const APInt &Zeroable, SDValue V1, SDValue V2,
12994 const X86Subtarget &Subtarget,
12995 SelectionDAG &DAG) {
12996 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12997 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12998 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13000 if (V2.isUndef()) {
13001 // Check for being able to broadcast a single element.
13002 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13003 Mask, Subtarget, DAG))
13006 // Straight shuffle of a single input vector. For everything from SSE2
13007 // onward this has a single fast instruction with no scary immediates.
13008 // We have to map the mask as it is actually a v4i32 shuffle instruction.
13009 V1 = DAG.getBitcast(MVT::v4i32, V1);
13010 int WidenedMask[4] = {
13011 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13012 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13013 return DAG.getBitcast(
13015 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13016 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13018 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13019 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13020 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13021 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13023 if (Subtarget.hasAVX2())
13024 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13027 // Try to use shift instructions.
13028 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13029 Zeroable, Subtarget, DAG))
13032 // When loading a scalar and then shuffling it into a vector we can often do
13033 // the insertion cheaply.
13034 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13035 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13037 // Try inverting the insertion since for v2 masks it is easy to do and we
13038 // can't reliably sort the mask one way or the other.
13039 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13040 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13041 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13044 // We have different paths for blend lowering, but they all must use the
13045 // *exact* same predicate.
13046 bool IsBlendSupported = Subtarget.hasSSE41();
13047 if (IsBlendSupported)
13048 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13049 Zeroable, Subtarget, DAG))
13052 // Use dedicated unpack instructions for masks that match their pattern.
13053 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13056 // Try to use byte rotation instructions.
13057 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13058 if (Subtarget.hasSSSE3()) {
13059 if (Subtarget.hasVLX())
13060 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
13064 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13069 // If we have direct support for blends, we should lower by decomposing into
13070 // a permute. That will be faster than the domain cross.
13071 if (IsBlendSupported)
13072 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13075 // We implement this with SHUFPD which is pretty lame because it will likely
13076 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13077 // However, all the alternatives are still more cycles and newer chips don't
13078 // have this problem. It would be really nice if x86 had better shuffles here.
13079 V1 = DAG.getBitcast(MVT::v2f64, V1);
13080 V2 = DAG.getBitcast(MVT::v2f64, V2);
13081 return DAG.getBitcast(MVT::v2i64,
13082 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13085 /// Lower a vector shuffle using the SHUFPS instruction.
13087 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13088 /// It makes no assumptions about whether this is the *best* lowering, it simply
13090 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13091 ArrayRef<int> Mask, SDValue V1,
13092 SDValue V2, SelectionDAG &DAG) {
13093 SDValue LowV = V1, HighV = V2;
13094 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
13096 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13098 if (NumV2Elements == 1) {
13099 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13101 // Compute the index adjacent to V2Index and in the same half by toggling
13103 int V2AdjIndex = V2Index ^ 1;
13105 if (Mask[V2AdjIndex] < 0) {
13106 // Handles all the cases where we have a single V2 element and an undef.
13107 // This will only ever happen in the high lanes because we commute the
13108 // vector otherwise.
13110 std::swap(LowV, HighV);
13111 NewMask[V2Index] -= 4;
13113 // Handle the case where the V2 element ends up adjacent to a V1 element.
13114 // To make this work, blend them together as the first step.
13115 int V1Index = V2AdjIndex;
13116 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13117 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13118 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13120 // Now proceed to reconstruct the final blend as we have the necessary
13121 // high or low half formed.
13128 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13129 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13131 } else if (NumV2Elements == 2) {
13132 if (Mask[0] < 4 && Mask[1] < 4) {
13133 // Handle the easy case where we have V1 in the low lanes and V2 in the
13137 } else if (Mask[2] < 4 && Mask[3] < 4) {
13138 // We also handle the reversed case because this utility may get called
13139 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13140 // arrange things in the right direction.
13146 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13147 // trying to place elements directly, just blend them and set up the final
13148 // shuffle to place them.
13150 // The first two blend mask elements are for V1, the second two are for
13152 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13153 Mask[2] < 4 ? Mask[2] : Mask[3],
13154 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13155 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13156 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13157 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13159 // Now we do a normal shuffle of V1 by giving V1 as both operands to
13162 NewMask[0] = Mask[0] < 4 ? 0 : 2;
13163 NewMask[1] = Mask[0] < 4 ? 2 : 0;
13164 NewMask[2] = Mask[2] < 4 ? 1 : 3;
13165 NewMask[3] = Mask[2] < 4 ? 3 : 1;
13168 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13169 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13172 /// Lower 4-lane 32-bit floating point shuffles.
13174 /// Uses instructions exclusively from the floating point unit to minimize
13175 /// domain crossing penalties, as these are sufficient to implement all v4f32
13177 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13178 const APInt &Zeroable, SDValue V1, SDValue V2,
13179 const X86Subtarget &Subtarget,
13180 SelectionDAG &DAG) {
13181 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13182 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13183 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13185 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13187 if (NumV2Elements == 0) {
13188 // Check for being able to broadcast a single element.
13189 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13190 Mask, Subtarget, DAG))
13193 // Use even/odd duplicate instructions for masks that match their pattern.
13194 if (Subtarget.hasSSE3()) {
13195 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13196 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13197 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13198 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13201 if (Subtarget.hasAVX()) {
13202 // If we have AVX, we can use VPERMILPS which will allow folding a load
13203 // into the shuffle.
13204 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13205 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13208 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13209 // in SSE1 because otherwise they are widened to v2f64 and never get here.
13210 if (!Subtarget.hasSSE2()) {
13211 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13212 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13213 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13214 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13217 // Otherwise, use a straight shuffle of a single input vector. We pass the
13218 // input vector to both operands to simulate this with a SHUFPS.
13219 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13220 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13223 if (Subtarget.hasAVX2())
13224 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13227 // There are special ways we can lower some single-element blends. However, we
13228 // have custom ways we can lower more complex single-element blends below that
13229 // we defer to if both this and BLENDPS fail to match, so restrict this to
13230 // when the V2 input is targeting element 0 of the mask -- that is the fast
13232 if (NumV2Elements == 1 && Mask[0] >= 4)
13233 if (SDValue V = lowerShuffleAsElementInsertion(
13234 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13237 if (Subtarget.hasSSE41()) {
13238 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13239 Zeroable, Subtarget, DAG))
13242 // Use INSERTPS if we can complete the shuffle efficiently.
13243 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13246 if (!isSingleSHUFPSMask(Mask))
13247 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13252 // Use low/high mov instructions. These are only valid in SSE1 because
13253 // otherwise they are widened to v2f64 and never get here.
13254 if (!Subtarget.hasSSE2()) {
13255 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13256 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13257 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13258 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13261 // Use dedicated unpack instructions for masks that match their pattern.
13262 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13265 // Otherwise fall back to a SHUFPS lowering strategy.
13266 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13269 /// Lower 4-lane i32 vector shuffles.
13271 /// We try to handle these with integer-domain shuffles where we can, but for
13272 /// blends we use the floating point domain blend instructions.
13273 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13274 const APInt &Zeroable, SDValue V1, SDValue V2,
13275 const X86Subtarget &Subtarget,
13276 SelectionDAG &DAG) {
13277 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13278 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13279 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13281 // Whenever we can lower this as a zext, that instruction is strictly faster
13282 // than any alternative. It also allows us to fold memory operands into the
13283 // shuffle in many cases.
13284 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13285 Zeroable, Subtarget, DAG))
13288 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13290 if (NumV2Elements == 0) {
13291 // Try to use broadcast unless the mask only has one non-undef element.
13292 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13293 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13294 Mask, Subtarget, DAG))
13298 // Straight shuffle of a single input vector. For everything from SSE2
13299 // onward this has a single fast instruction with no scary immediates.
13300 // We coerce the shuffle pattern to be compatible with UNPCK instructions
13301 // but we aren't actually going to use the UNPCK instruction because doing
13302 // so prevents folding a load into this instruction or making a copy.
13303 const int UnpackLoMask[] = {0, 0, 1, 1};
13304 const int UnpackHiMask[] = {2, 2, 3, 3};
13305 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13306 Mask = UnpackLoMask;
13307 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13308 Mask = UnpackHiMask;
13310 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13311 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13314 if (Subtarget.hasAVX2())
13315 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13318 // Try to use shift instructions.
13319 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13320 Zeroable, Subtarget, DAG))
13323 // There are special ways we can lower some single-element blends.
13324 if (NumV2Elements == 1)
13325 if (SDValue V = lowerShuffleAsElementInsertion(
13326 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13329 // We have different paths for blend lowering, but they all must use the
13330 // *exact* same predicate.
13331 bool IsBlendSupported = Subtarget.hasSSE41();
13332 if (IsBlendSupported)
13333 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13334 Zeroable, Subtarget, DAG))
13337 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13338 Zeroable, Subtarget, DAG))
13341 // Use dedicated unpack instructions for masks that match their pattern.
13342 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13345 // Try to use byte rotation instructions.
13346 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13347 if (Subtarget.hasSSSE3()) {
13348 if (Subtarget.hasVLX())
13349 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13353 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13358 // Assume that a single SHUFPS is faster than an alternative sequence of
13359 // multiple instructions (even if the CPU has a domain penalty).
13360 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13361 if (!isSingleSHUFPSMask(Mask)) {
13362 // If we have direct support for blends, we should lower by decomposing into
13363 // a permute. That will be faster than the domain cross.
13364 if (IsBlendSupported)
13365 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13368 // Try to lower by permuting the inputs into an unpack instruction.
13369 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13370 Mask, Subtarget, DAG))
13374 // We implement this with SHUFPS because it can blend from two vectors.
13375 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13376 // up the inputs, bypassing domain shift penalties that we would incur if we
13377 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13379 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13380 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13381 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13382 return DAG.getBitcast(MVT::v4i32, ShufPS);
13385 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13386 /// shuffle lowering, and the most complex part.
13388 /// The lowering strategy is to try to form pairs of input lanes which are
13389 /// targeted at the same half of the final vector, and then use a dword shuffle
13390 /// to place them onto the right half, and finally unpack the paired lanes into
13391 /// their final position.
13393 /// The exact breakdown of how to form these dword pairs and align them on the
13394 /// correct sides is really tricky. See the comments within the function for
13395 /// more of the details.
13397 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13398 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13399 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13400 /// vector, form the analogous 128-bit 8-element Mask.
13401 static SDValue lowerV8I16GeneralSingleInputShuffle(
13402 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13403 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13404 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13405 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13407 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13408 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13409 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13411 // Attempt to directly match PSHUFLW or PSHUFHW.
13412 if (isUndefOrInRange(LoMask, 0, 4) &&
13413 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13414 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13415 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13417 if (isUndefOrInRange(HiMask, 4, 8) &&
13418 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13419 for (int i = 0; i != 4; ++i)
13420 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13421 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13422 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13425 SmallVector<int, 4> LoInputs;
13426 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13427 array_pod_sort(LoInputs.begin(), LoInputs.end());
13428 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13429 SmallVector<int, 4> HiInputs;
13430 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13431 array_pod_sort(HiInputs.begin(), HiInputs.end());
13432 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13433 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13434 int NumHToL = LoInputs.size() - NumLToL;
13435 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13436 int NumHToH = HiInputs.size() - NumLToH;
13437 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13438 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13439 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13440 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13442 // If we are shuffling values from one half - check how many different DWORD
13443 // pairs we need to create. If only 1 or 2 then we can perform this as a
13444 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13445 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13446 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13447 V = DAG.getNode(ShufWOp, DL, VT, V,
13448 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13449 V = DAG.getBitcast(PSHUFDVT, V);
13450 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13451 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13452 return DAG.getBitcast(VT, V);
13455 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13456 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13457 SmallVector<std::pair<int, int>, 4> DWordPairs;
13458 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13460 // Collect the different DWORD pairs.
13461 for (int DWord = 0; DWord != 4; ++DWord) {
13462 int M0 = Mask[2 * DWord + 0];
13463 int M1 = Mask[2 * DWord + 1];
13464 M0 = (M0 >= 0 ? M0 % 4 : M0);
13465 M1 = (M1 >= 0 ? M1 % 4 : M1);
13466 if (M0 < 0 && M1 < 0)
13469 bool Match = false;
13470 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13471 auto &DWordPair = DWordPairs[j];
13472 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13473 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13474 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13475 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13476 PSHUFDMask[DWord] = DOffset + j;
13482 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13483 DWordPairs.push_back(std::make_pair(M0, M1));
13487 if (DWordPairs.size() <= 2) {
13488 DWordPairs.resize(2, std::make_pair(-1, -1));
13489 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13490 DWordPairs[1].first, DWordPairs[1].second};
13491 if ((NumHToL + NumHToH) == 0)
13492 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13493 if ((NumLToL + NumLToH) == 0)
13494 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13498 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13499 // such inputs we can swap two of the dwords across the half mark and end up
13500 // with <=2 inputs to each half in each half. Once there, we can fall through
13501 // to the generic code below. For example:
13503 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13504 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13506 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13507 // and an existing 2-into-2 on the other half. In this case we may have to
13508 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13509 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13510 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13511 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13512 // half than the one we target for fixing) will be fixed when we re-enter this
13513 // path. We will also combine away any sequence of PSHUFD instructions that
13514 // result into a single instruction. Here is an example of the tricky case:
13516 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13517 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13519 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13521 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13522 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13524 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13525 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13527 // The result is fine to be handled by the generic logic.
13528 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13529 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13530 int AOffset, int BOffset) {
13531 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13532 "Must call this with A having 3 or 1 inputs from the A half.");
13533 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13534 "Must call this with B having 1 or 3 inputs from the B half.");
13535 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13536 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13538 bool ThreeAInputs = AToAInputs.size() == 3;
13540 // Compute the index of dword with only one word among the three inputs in
13541 // a half by taking the sum of the half with three inputs and subtracting
13542 // the sum of the actual three inputs. The difference is the remaining
13544 int ADWord = 0, BDWord = 0;
13545 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13546 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13547 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13548 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13549 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13550 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13551 int TripleNonInputIdx =
13552 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13553 TripleDWord = TripleNonInputIdx / 2;
13555 // We use xor with one to compute the adjacent DWord to whichever one the
13557 OneInputDWord = (OneInput / 2) ^ 1;
13559 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13560 // and BToA inputs. If there is also such a problem with the BToB and AToB
13561 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13562 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13563 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13564 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13565 // Compute how many inputs will be flipped by swapping these DWords. We
13567 // to balance this to ensure we don't form a 3-1 shuffle in the other
13569 int NumFlippedAToBInputs =
13570 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13571 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13572 int NumFlippedBToBInputs =
13573 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13574 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13575 if ((NumFlippedAToBInputs == 1 &&
13576 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13577 (NumFlippedBToBInputs == 1 &&
13578 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13579 // We choose whether to fix the A half or B half based on whether that
13580 // half has zero flipped inputs. At zero, we may not be able to fix it
13581 // with that half. We also bias towards fixing the B half because that
13582 // will more commonly be the high half, and we have to bias one way.
13583 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13584 ArrayRef<int> Inputs) {
13585 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13586 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13587 // Determine whether the free index is in the flipped dword or the
13588 // unflipped dword based on where the pinned index is. We use this bit
13589 // in an xor to conditionally select the adjacent dword.
13590 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13591 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13592 if (IsFixIdxInput == IsFixFreeIdxInput)
13594 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13595 assert(IsFixIdxInput != IsFixFreeIdxInput &&
13596 "We need to be changing the number of flipped inputs!");
13597 int PSHUFHalfMask[] = {0, 1, 2, 3};
13598 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13600 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13601 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13602 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13604 for (int &M : Mask)
13605 if (M >= 0 && M == FixIdx)
13607 else if (M >= 0 && M == FixFreeIdx)
13610 if (NumFlippedBToBInputs != 0) {
13612 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13613 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13615 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13616 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13617 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13622 int PSHUFDMask[] = {0, 1, 2, 3};
13623 PSHUFDMask[ADWord] = BDWord;
13624 PSHUFDMask[BDWord] = ADWord;
13625 V = DAG.getBitcast(
13627 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13628 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13630 // Adjust the mask to match the new locations of A and B.
13631 for (int &M : Mask)
13632 if (M >= 0 && M/2 == ADWord)
13633 M = 2 * BDWord + M % 2;
13634 else if (M >= 0 && M/2 == BDWord)
13635 M = 2 * ADWord + M % 2;
13637 // Recurse back into this routine to re-compute state now that this isn't
13638 // a 3 and 1 problem.
13639 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13641 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13642 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13643 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13644 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13646 // At this point there are at most two inputs to the low and high halves from
13647 // each half. That means the inputs can always be grouped into dwords and
13648 // those dwords can then be moved to the correct half with a dword shuffle.
13649 // We use at most one low and one high word shuffle to collect these paired
13650 // inputs into dwords, and finally a dword shuffle to place them.
13651 int PSHUFLMask[4] = {-1, -1, -1, -1};
13652 int PSHUFHMask[4] = {-1, -1, -1, -1};
13653 int PSHUFDMask[4] = {-1, -1, -1, -1};
13655 // First fix the masks for all the inputs that are staying in their
13656 // original halves. This will then dictate the targets of the cross-half
13658 auto fixInPlaceInputs =
13659 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13660 MutableArrayRef<int> SourceHalfMask,
13661 MutableArrayRef<int> HalfMask, int HalfOffset) {
13662 if (InPlaceInputs.empty())
13664 if (InPlaceInputs.size() == 1) {
13665 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13666 InPlaceInputs[0] - HalfOffset;
13667 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13670 if (IncomingInputs.empty()) {
13671 // Just fix all of the in place inputs.
13672 for (int Input : InPlaceInputs) {
13673 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13674 PSHUFDMask[Input / 2] = Input / 2;
13679 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13680 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13681 InPlaceInputs[0] - HalfOffset;
13682 // Put the second input next to the first so that they are packed into
13683 // a dword. We find the adjacent index by toggling the low bit.
13684 int AdjIndex = InPlaceInputs[0] ^ 1;
13685 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13686 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13687 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13689 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13690 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13692 // Now gather the cross-half inputs and place them into a free dword of
13693 // their target half.
13694 // FIXME: This operation could almost certainly be simplified dramatically to
13695 // look more like the 3-1 fixing operation.
13696 auto moveInputsToRightHalf = [&PSHUFDMask](
13697 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13698 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13699 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13701 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13702 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13704 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13706 int LowWord = Word & ~1;
13707 int HighWord = Word | 1;
13708 return isWordClobbered(SourceHalfMask, LowWord) ||
13709 isWordClobbered(SourceHalfMask, HighWord);
13712 if (IncomingInputs.empty())
13715 if (ExistingInputs.empty()) {
13716 // Map any dwords with inputs from them into the right half.
13717 for (int Input : IncomingInputs) {
13718 // If the source half mask maps over the inputs, turn those into
13719 // swaps and use the swapped lane.
13720 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13721 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13722 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13723 Input - SourceOffset;
13724 // We have to swap the uses in our half mask in one sweep.
13725 for (int &M : HalfMask)
13726 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13728 else if (M == Input)
13729 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13731 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13732 Input - SourceOffset &&
13733 "Previous placement doesn't match!");
13735 // Note that this correctly re-maps both when we do a swap and when
13736 // we observe the other side of the swap above. We rely on that to
13737 // avoid swapping the members of the input list directly.
13738 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13741 // Map the input's dword into the correct half.
13742 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13743 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13745 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13747 "Previous placement doesn't match!");
13750 // And just directly shift any other-half mask elements to be same-half
13751 // as we will have mirrored the dword containing the element into the
13752 // same position within that half.
13753 for (int &M : HalfMask)
13754 if (M >= SourceOffset && M < SourceOffset + 4) {
13755 M = M - SourceOffset + DestOffset;
13756 assert(M >= 0 && "This should never wrap below zero!");
13761 // Ensure we have the input in a viable dword of its current half. This
13762 // is particularly tricky because the original position may be clobbered
13763 // by inputs being moved and *staying* in that half.
13764 if (IncomingInputs.size() == 1) {
13765 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13766 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13768 SourceHalfMask[InputFixed - SourceOffset] =
13769 IncomingInputs[0] - SourceOffset;
13770 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13772 IncomingInputs[0] = InputFixed;
13774 } else if (IncomingInputs.size() == 2) {
13775 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13776 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13777 // We have two non-adjacent or clobbered inputs we need to extract from
13778 // the source half. To do this, we need to map them into some adjacent
13779 // dword slot in the source mask.
13780 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13781 IncomingInputs[1] - SourceOffset};
13783 // If there is a free slot in the source half mask adjacent to one of
13784 // the inputs, place the other input in it. We use (Index XOR 1) to
13785 // compute an adjacent index.
13786 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13787 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13788 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13789 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13790 InputsFixed[1] = InputsFixed[0] ^ 1;
13791 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13792 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13793 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13794 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13795 InputsFixed[0] = InputsFixed[1] ^ 1;
13796 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13797 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13798 // The two inputs are in the same DWord but it is clobbered and the
13799 // adjacent DWord isn't used at all. Move both inputs to the free
13801 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13802 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13803 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13804 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13806 // The only way we hit this point is if there is no clobbering
13807 // (because there are no off-half inputs to this half) and there is no
13808 // free slot adjacent to one of the inputs. In this case, we have to
13809 // swap an input with a non-input.
13810 for (int i = 0; i < 4; ++i)
13811 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13812 "We can't handle any clobbers here!");
13813 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13814 "Cannot have adjacent inputs here!");
13816 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13817 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13819 // We also have to update the final source mask in this case because
13820 // it may need to undo the above swap.
13821 for (int &M : FinalSourceHalfMask)
13822 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13823 M = InputsFixed[1] + SourceOffset;
13824 else if (M == InputsFixed[1] + SourceOffset)
13825 M = (InputsFixed[0] ^ 1) + SourceOffset;
13827 InputsFixed[1] = InputsFixed[0] ^ 1;
13830 // Point everything at the fixed inputs.
13831 for (int &M : HalfMask)
13832 if (M == IncomingInputs[0])
13833 M = InputsFixed[0] + SourceOffset;
13834 else if (M == IncomingInputs[1])
13835 M = InputsFixed[1] + SourceOffset;
13837 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13838 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13841 llvm_unreachable("Unhandled input size!");
13844 // Now hoist the DWord down to the right half.
13845 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13846 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13847 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13848 for (int &M : HalfMask)
13849 for (int Input : IncomingInputs)
13851 M = FreeDWord * 2 + Input % 2;
13853 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13854 /*SourceOffset*/ 4, /*DestOffset*/ 0);
13855 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13856 /*SourceOffset*/ 0, /*DestOffset*/ 4);
13858 // Now enact all the shuffles we've computed to move the inputs into their
13860 if (!isNoopShuffleMask(PSHUFLMask))
13861 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13862 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13863 if (!isNoopShuffleMask(PSHUFHMask))
13864 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13865 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13866 if (!isNoopShuffleMask(PSHUFDMask))
13867 V = DAG.getBitcast(
13869 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13870 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13872 // At this point, each half should contain all its inputs, and we can then
13873 // just shuffle them into their final position.
13874 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13875 "Failed to lift all the high half inputs to the low mask!");
13876 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13877 "Failed to lift all the low half inputs to the high mask!");
13879 // Do a half shuffle for the low mask.
13880 if (!isNoopShuffleMask(LoMask))
13881 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13882 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13884 // Do a half shuffle with the high mask after shifting its values down.
13885 for (int &M : HiMask)
13888 if (!isNoopShuffleMask(HiMask))
13889 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13890 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13895 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13896 /// blend if only one input is used.
13897 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13898 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13899 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13900 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13901 "Lane crossing shuffle masks not supported");
13903 int NumBytes = VT.getSizeInBits() / 8;
13904 int Size = Mask.size();
13905 int Scale = NumBytes / Size;
13907 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13908 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13912 for (int i = 0; i < NumBytes; ++i) {
13913 int M = Mask[i / Scale];
13917 const int ZeroMask = 0x80;
13918 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13919 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13920 if (Zeroable[i / Scale])
13921 V1Idx = V2Idx = ZeroMask;
13923 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13924 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13925 V1InUse |= (ZeroMask != V1Idx);
13926 V2InUse |= (ZeroMask != V2Idx);
13929 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13931 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13932 DAG.getBuildVector(ShufVT, DL, V1Mask));
13934 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13935 DAG.getBuildVector(ShufVT, DL, V2Mask));
13937 // If we need shuffled inputs from both, blend the two.
13939 if (V1InUse && V2InUse)
13940 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13942 V = V1InUse ? V1 : V2;
13944 // Cast the result back to the correct type.
13945 return DAG.getBitcast(VT, V);
13948 /// Generic lowering of 8-lane i16 shuffles.
13950 /// This handles both single-input shuffles and combined shuffle/blends with
13951 /// two inputs. The single input shuffles are immediately delegated to
13952 /// a dedicated lowering routine.
13954 /// The blends are lowered in one of three fundamental ways. If there are few
13955 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13956 /// of the input is significantly cheaper when lowered as an interleaving of
13957 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13958 /// halves of the inputs separately (making them have relatively few inputs)
13959 /// and then concatenate them.
13960 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13961 const APInt &Zeroable, SDValue V1, SDValue V2,
13962 const X86Subtarget &Subtarget,
13963 SelectionDAG &DAG) {
13964 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13965 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13966 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13968 // Whenever we can lower this as a zext, that instruction is strictly faster
13969 // than any alternative.
13970 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13971 Zeroable, Subtarget, DAG))
13974 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13976 if (NumV2Inputs == 0) {
13977 // Try to use shift instructions.
13978 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
13979 Zeroable, Subtarget, DAG))
13982 // Check for being able to broadcast a single element.
13983 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13984 Mask, Subtarget, DAG))
13987 // Use dedicated unpack instructions for masks that match their pattern.
13988 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13991 // Use dedicated pack instructions for masks that match their pattern.
13992 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13996 // Try to use byte rotation instructions.
13997 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14001 // Make a copy of the mask so it can be modified.
14002 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14003 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14007 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14008 "All single-input shuffles should be canonicalized to be V1-input "
14011 // Try to use shift instructions.
14012 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14013 Zeroable, Subtarget, DAG))
14016 // See if we can use SSE4A Extraction / Insertion.
14017 if (Subtarget.hasSSE4A())
14018 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14022 // There are special ways we can lower some single-element blends.
14023 if (NumV2Inputs == 1)
14024 if (SDValue V = lowerShuffleAsElementInsertion(
14025 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14028 // We have different paths for blend lowering, but they all must use the
14029 // *exact* same predicate.
14030 bool IsBlendSupported = Subtarget.hasSSE41();
14031 if (IsBlendSupported)
14032 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14033 Zeroable, Subtarget, DAG))
14036 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14037 Zeroable, Subtarget, DAG))
14040 // Use dedicated unpack instructions for masks that match their pattern.
14041 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14044 // Use dedicated pack instructions for masks that match their pattern.
14045 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14049 // Try to use byte rotation instructions.
14050 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14054 if (SDValue BitBlend =
14055 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14058 // Try to use byte shift instructions to mask.
14059 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14060 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14063 // Try to lower by permuting the inputs into an unpack instruction.
14064 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14065 Mask, Subtarget, DAG))
14068 // If we can't directly blend but can use PSHUFB, that will be better as it
14069 // can both shuffle and set up the inefficient blend.
14070 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14071 bool V1InUse, V2InUse;
14072 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14073 Zeroable, DAG, V1InUse, V2InUse);
14076 // We can always bit-blend if we have to so the fallback strategy is to
14077 // decompose into single-input permutes and blends.
14078 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14079 Mask, Subtarget, DAG);
14082 /// Check whether a compaction lowering can be done by dropping even
14083 /// elements and compute how many times even elements must be dropped.
14085 /// This handles shuffles which take every Nth element where N is a power of
14086 /// two. Example shuffle masks:
14088 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
14089 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
14090 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
14091 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
14092 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
14093 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
14095 /// Any of these lanes can of course be undef.
14097 /// This routine only supports N <= 3.
14098 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
14101 /// \returns N above, or the number of times even elements must be dropped if
14102 /// there is such a number. Otherwise returns zero.
14103 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
14104 bool IsSingleInput) {
14105 // The modulus for the shuffle vector entries is based on whether this is
14106 // a single input or not.
14107 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
14108 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
14109 "We should only be called with masks with a power-of-2 size!");
14111 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
14113 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
14114 // and 2^3 simultaneously. This is because we may have ambiguity with
14115 // partially undef inputs.
14116 bool ViableForN[3] = {true, true, true};
14118 for (int i = 0, e = Mask.size(); i < e; ++i) {
14119 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
14124 bool IsAnyViable = false;
14125 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14126 if (ViableForN[j]) {
14127 uint64_t N = j + 1;
14129 // The shuffle mask must be equal to (i * 2^N) % M.
14130 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
14131 IsAnyViable = true;
14133 ViableForN[j] = false;
14135 // Early exit if we exhaust the possible powers of two.
14140 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14144 // Return 0 as there is no viable power of two.
14148 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14149 ArrayRef<int> Mask, SDValue V1,
14150 SDValue V2, SelectionDAG &DAG) {
14151 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14152 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14154 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14156 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14158 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14161 /// Generic lowering of v16i8 shuffles.
14163 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14164 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14165 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14166 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14168 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14169 const APInt &Zeroable, SDValue V1, SDValue V2,
14170 const X86Subtarget &Subtarget,
14171 SelectionDAG &DAG) {
14172 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14173 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14174 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14176 // Try to use shift instructions.
14177 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14178 Zeroable, Subtarget, DAG))
14181 // Try to use byte rotation instructions.
14182 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14186 // Use dedicated pack instructions for masks that match their pattern.
14187 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14191 // Try to use a zext lowering.
14192 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14193 Zeroable, Subtarget, DAG))
14196 // See if we can use SSE4A Extraction / Insertion.
14197 if (Subtarget.hasSSE4A())
14198 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14202 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14204 // For single-input shuffles, there are some nicer lowering tricks we can use.
14205 if (NumV2Elements == 0) {
14206 // Check for being able to broadcast a single element.
14207 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14208 Mask, Subtarget, DAG))
14211 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14214 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14215 // Notably, this handles splat and partial-splat shuffles more efficiently.
14216 // However, it only makes sense if the pre-duplication shuffle simplifies
14217 // things significantly. Currently, this means we need to be able to
14218 // express the pre-duplication shuffle as an i16 shuffle.
14220 // FIXME: We should check for other patterns which can be widened into an
14221 // i16 shuffle as well.
14222 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14223 for (int i = 0; i < 16; i += 2)
14224 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14229 auto tryToWidenViaDuplication = [&]() -> SDValue {
14230 if (!canWidenViaDuplication(Mask))
14232 SmallVector<int, 4> LoInputs;
14233 copy_if(Mask, std::back_inserter(LoInputs),
14234 [](int M) { return M >= 0 && M < 8; });
14235 array_pod_sort(LoInputs.begin(), LoInputs.end());
14236 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14238 SmallVector<int, 4> HiInputs;
14239 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14240 array_pod_sort(HiInputs.begin(), HiInputs.end());
14241 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14244 bool TargetLo = LoInputs.size() >= HiInputs.size();
14245 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14246 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14248 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14249 SmallDenseMap<int, int, 8> LaneMap;
14250 for (int I : InPlaceInputs) {
14251 PreDupI16Shuffle[I/2] = I/2;
14254 int j = TargetLo ? 0 : 4, je = j + 4;
14255 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14256 // Check if j is already a shuffle of this input. This happens when
14257 // there are two adjacent bytes after we move the low one.
14258 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14259 // If we haven't yet mapped the input, search for a slot into which
14261 while (j < je && PreDupI16Shuffle[j] >= 0)
14265 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14268 // Map this input with the i16 shuffle.
14269 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14272 // Update the lane map based on the mapping we ended up with.
14273 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14275 V1 = DAG.getBitcast(
14277 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14278 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14280 // Unpack the bytes to form the i16s that will be shuffled into place.
14281 bool EvenInUse = false, OddInUse = false;
14282 for (int i = 0; i < 16; i += 2) {
14283 EvenInUse |= (Mask[i + 0] >= 0);
14284 OddInUse |= (Mask[i + 1] >= 0);
14285 if (EvenInUse && OddInUse)
14288 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14289 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14290 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14292 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14293 for (int i = 0; i < 16; ++i)
14294 if (Mask[i] >= 0) {
14295 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14296 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14297 if (PostDupI16Shuffle[i / 2] < 0)
14298 PostDupI16Shuffle[i / 2] = MappedMask;
14300 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14301 "Conflicting entries in the original shuffle!");
14303 return DAG.getBitcast(
14305 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14306 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14308 if (SDValue V = tryToWidenViaDuplication())
14312 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14313 Zeroable, Subtarget, DAG))
14316 // Use dedicated unpack instructions for masks that match their pattern.
14317 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14320 // Try to use byte shift instructions to mask.
14321 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14322 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14325 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14326 // with PSHUFB. It is important to do this before we attempt to generate any
14327 // blends but after all of the single-input lowerings. If the single input
14328 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14329 // want to preserve that and we can DAG combine any longer sequences into
14330 // a PSHUFB in the end. But once we start blending from multiple inputs,
14331 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14332 // and there are *very* few patterns that would actually be faster than the
14333 // PSHUFB approach because of its ability to zero lanes.
14335 // FIXME: The only exceptions to the above are blends which are exact
14336 // interleavings with direct instructions supporting them. We currently don't
14337 // handle those well here.
14338 if (Subtarget.hasSSSE3()) {
14339 bool V1InUse = false;
14340 bool V2InUse = false;
14342 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14343 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14345 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14346 // do so. This avoids using them to handle blends-with-zero which is
14347 // important as a single pshufb is significantly faster for that.
14348 if (V1InUse && V2InUse) {
14349 if (Subtarget.hasSSE41())
14350 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14351 Zeroable, Subtarget, DAG))
14354 // We can use an unpack to do the blending rather than an or in some
14355 // cases. Even though the or may be (very minorly) more efficient, we
14356 // preference this lowering because there are common cases where part of
14357 // the complexity of the shuffles goes away when we do the final blend as
14359 // FIXME: It might be worth trying to detect if the unpack-feeding
14360 // shuffles will both be pshufb, in which case we shouldn't bother with
14362 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14363 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14366 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14367 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14368 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14370 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14371 // PALIGNR will be cheaper than the second PSHUFB+OR.
14372 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14373 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14380 // There are special ways we can lower some single-element blends.
14381 if (NumV2Elements == 1)
14382 if (SDValue V = lowerShuffleAsElementInsertion(
14383 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14386 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14389 // Check whether a compaction lowering can be done. This handles shuffles
14390 // which take every Nth element for some even N. See the helper function for
14393 // We special case these as they can be particularly efficiently handled with
14394 // the PACKUSB instruction on x86 and they show up in common patterns of
14395 // rearranging bytes to truncate wide elements.
14396 bool IsSingleInput = V2.isUndef();
14397 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14398 // NumEvenDrops is the power of two stride of the elements. Another way of
14399 // thinking about it is that we need to drop the even elements this many
14400 // times to get the original input.
14402 // First we need to zero all the dropped bytes.
14403 assert(NumEvenDrops <= 3 &&
14404 "No support for dropping even elements more than 3 times.");
14405 SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
14406 for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
14407 ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
14408 SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
14409 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14410 if (!IsSingleInput)
14411 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14413 // Now pack things back together.
14414 V1 = DAG.getBitcast(MVT::v8i16, V1);
14415 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14416 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14417 for (int i = 1; i < NumEvenDrops; ++i) {
14418 Result = DAG.getBitcast(MVT::v8i16, Result);
14419 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14425 // Handle multi-input cases by blending single-input shuffles.
14426 if (NumV2Elements > 0)
14427 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14430 // The fallback path for single-input shuffles widens this into two v8i16
14431 // vectors with unpacks, shuffles those, and then pulls them back together
14435 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14436 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14437 for (int i = 0; i < 16; ++i)
14439 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14441 SDValue VLoHalf, VHiHalf;
14442 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14443 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14445 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14446 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14447 // Use a mask to drop the high bytes.
14448 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14449 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14450 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14452 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14453 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14455 // Squash the masks to point directly into VLoHalf.
14456 for (int &M : LoBlendMask)
14459 for (int &M : HiBlendMask)
14463 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14464 // VHiHalf so that we can blend them as i16s.
14465 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14467 VLoHalf = DAG.getBitcast(
14468 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14469 VHiHalf = DAG.getBitcast(
14470 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14473 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14474 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14476 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14479 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14481 /// This routine breaks down the specific type of 128-bit shuffle and
14482 /// dispatches to the lowering routines accordingly.
14483 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14484 MVT VT, SDValue V1, SDValue V2,
14485 const APInt &Zeroable,
14486 const X86Subtarget &Subtarget,
14487 SelectionDAG &DAG) {
14488 switch (VT.SimpleTy) {
14490 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14492 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14494 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14496 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14498 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14500 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14503 llvm_unreachable("Unimplemented!");
14507 /// Generic routine to split vector shuffle into half-sized shuffles.
14509 /// This routine just extracts two subvectors, shuffles them independently, and
14510 /// then concatenates them back together. This should work effectively with all
14511 /// AVX vector shuffle types.
14512 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14513 SDValue V2, ArrayRef<int> Mask,
14514 SelectionDAG &DAG) {
14515 assert(VT.getSizeInBits() >= 256 &&
14516 "Only for 256-bit or wider vector shuffles!");
14517 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14518 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14520 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14521 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14523 int NumElements = VT.getVectorNumElements();
14524 int SplitNumElements = NumElements / 2;
14525 MVT ScalarVT = VT.getVectorElementType();
14526 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14528 // Rather than splitting build-vectors, just build two narrower build
14529 // vectors. This helps shuffling with splats and zeros.
14530 auto SplitVector = [&](SDValue V) {
14531 V = peekThroughBitcasts(V);
14533 MVT OrigVT = V.getSimpleValueType();
14534 int OrigNumElements = OrigVT.getVectorNumElements();
14535 int OrigSplitNumElements = OrigNumElements / 2;
14536 MVT OrigScalarVT = OrigVT.getVectorElementType();
14537 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14541 auto *BV = dyn_cast<BuildVectorSDNode>(V);
14543 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14544 DAG.getIntPtrConstant(0, DL));
14545 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14546 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14549 SmallVector<SDValue, 16> LoOps, HiOps;
14550 for (int i = 0; i < OrigSplitNumElements; ++i) {
14551 LoOps.push_back(BV->getOperand(i));
14552 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14554 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14555 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14557 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14558 DAG.getBitcast(SplitVT, HiV));
14561 SDValue LoV1, HiV1, LoV2, HiV2;
14562 std::tie(LoV1, HiV1) = SplitVector(V1);
14563 std::tie(LoV2, HiV2) = SplitVector(V2);
14565 // Now create two 4-way blends of these half-width vectors.
14566 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14567 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14568 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14569 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14570 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14571 for (int i = 0; i < SplitNumElements; ++i) {
14572 int M = HalfMask[i];
14573 if (M >= NumElements) {
14574 if (M >= NumElements + SplitNumElements)
14578 V2BlendMask[i] = M - NumElements;
14579 BlendMask[i] = SplitNumElements + i;
14580 } else if (M >= 0) {
14581 if (M >= SplitNumElements)
14585 V1BlendMask[i] = M;
14590 // Because the lowering happens after all combining takes place, we need to
14591 // manually combine these blend masks as much as possible so that we create
14592 // a minimal number of high-level vector shuffle nodes.
14594 // First try just blending the halves of V1 or V2.
14595 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14596 return DAG.getUNDEF(SplitVT);
14597 if (!UseLoV2 && !UseHiV2)
14598 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14599 if (!UseLoV1 && !UseHiV1)
14600 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14602 SDValue V1Blend, V2Blend;
14603 if (UseLoV1 && UseHiV1) {
14605 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14607 // We only use half of V1 so map the usage down into the final blend mask.
14608 V1Blend = UseLoV1 ? LoV1 : HiV1;
14609 for (int i = 0; i < SplitNumElements; ++i)
14610 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14611 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14613 if (UseLoV2 && UseHiV2) {
14615 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14617 // We only use half of V2 so map the usage down into the final blend mask.
14618 V2Blend = UseLoV2 ? LoV2 : HiV2;
14619 for (int i = 0; i < SplitNumElements; ++i)
14620 if (BlendMask[i] >= SplitNumElements)
14621 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14623 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14625 SDValue Lo = HalfBlend(LoMask);
14626 SDValue Hi = HalfBlend(HiMask);
14627 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14630 /// Either split a vector in halves or decompose the shuffles and the
14633 /// This is provided as a good fallback for many lowerings of non-single-input
14634 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14635 /// between splitting the shuffle into 128-bit components and stitching those
14636 /// back together vs. extracting the single-input shuffles and blending those
14638 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14639 SDValue V2, ArrayRef<int> Mask,
14640 const X86Subtarget &Subtarget,
14641 SelectionDAG &DAG) {
14642 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14643 "shuffles as it could then recurse on itself.");
14644 int Size = Mask.size();
14646 // If this can be modeled as a broadcast of two elements followed by a blend,
14647 // prefer that lowering. This is especially important because broadcasts can
14648 // often fold with memory operands.
14649 auto DoBothBroadcast = [&] {
14650 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14653 if (V2BroadcastIdx < 0)
14654 V2BroadcastIdx = M - Size;
14655 else if (M - Size != V2BroadcastIdx)
14657 } else if (M >= 0) {
14658 if (V1BroadcastIdx < 0)
14659 V1BroadcastIdx = M;
14660 else if (M != V1BroadcastIdx)
14665 if (DoBothBroadcast())
14666 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14669 // If the inputs all stem from a single 128-bit lane of each input, then we
14670 // split them rather than blending because the split will decompose to
14671 // unusually few instructions.
14672 int LaneCount = VT.getSizeInBits() / 128;
14673 int LaneSize = Size / LaneCount;
14674 SmallBitVector LaneInputs[2];
14675 LaneInputs[0].resize(LaneCount, false);
14676 LaneInputs[1].resize(LaneCount, false);
14677 for (int i = 0; i < Size; ++i)
14679 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14680 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14681 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14683 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14684 // that the decomposed single-input shuffles don't end up here.
14685 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14689 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14690 /// a lane permutation followed by a per-lane permutation.
14692 /// This is mainly for cases where we can have non-repeating permutes
14695 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14696 /// we should investigate merging them.
14697 static SDValue lowerShuffleAsLanePermuteAndPermute(
14698 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14699 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14700 int NumElts = VT.getVectorNumElements();
14701 int NumLanes = VT.getSizeInBits() / 128;
14702 int NumEltsPerLane = NumElts / NumLanes;
14704 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14705 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14707 for (int i = 0; i != NumElts; ++i) {
14712 // Ensure that each lane comes from a single source lane.
14713 int SrcLane = M / NumEltsPerLane;
14714 int DstLane = i / NumEltsPerLane;
14715 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14717 SrcLaneMask[DstLane] = SrcLane;
14719 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14722 // Make sure we set all elements of the lane mask, to avoid undef propagation.
14723 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14724 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14725 int SrcLane = SrcLaneMask[DstLane];
14727 for (int j = 0; j != NumEltsPerLane; ++j) {
14728 LaneMask[(DstLane * NumEltsPerLane) + j] =
14729 (SrcLane * NumEltsPerLane) + j;
14733 // If we're only shuffling a single lowest lane and the rest are identity
14734 // then don't bother.
14735 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14736 int NumIdentityLanes = 0;
14737 bool OnlyShuffleLowestLane = true;
14738 for (int i = 0; i != NumLanes; ++i) {
14739 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14740 i * NumEltsPerLane))
14741 NumIdentityLanes++;
14742 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14743 OnlyShuffleLowestLane = false;
14745 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14748 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14749 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14752 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14753 /// source with a lane permutation.
14755 /// This lowering strategy results in four instructions in the worst case for a
14756 /// single-input cross lane shuffle which is lower than any other fully general
14757 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14758 /// shuffle pattern should be handled prior to trying this lowering.
14759 static SDValue lowerShuffleAsLanePermuteAndShuffle(
14760 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14761 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14762 // FIXME: This should probably be generalized for 512-bit vectors as well.
14763 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14764 int Size = Mask.size();
14765 int LaneSize = Size / 2;
14767 // If there are only inputs from one 128-bit lane, splitting will in fact be
14768 // less expensive. The flags track whether the given lane contains an element
14769 // that crosses to another lane.
14770 if (!Subtarget.hasAVX2()) {
14771 bool LaneCrossing[2] = {false, false};
14772 for (int i = 0; i < Size; ++i)
14773 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
14774 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14775 if (!LaneCrossing[0] || !LaneCrossing[1])
14776 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14778 bool LaneUsed[2] = {false, false};
14779 for (int i = 0; i < Size; ++i)
14781 LaneUsed[(Mask[i] / LaneSize)] = true;
14782 if (!LaneUsed[0] || !LaneUsed[1])
14783 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14786 // TODO - we could support shuffling V2 in the Flipped input.
14787 assert(V2.isUndef() &&
14788 "This last part of this routine only works on single input shuffles");
14790 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
14791 for (int i = 0; i < Size; ++i) {
14792 int &M = InLaneMask[i];
14795 if (((M % Size) / LaneSize) != (i / LaneSize))
14796 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
14798 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
14799 "In-lane shuffle mask expected");
14801 // Flip the lanes, and shuffle the results which should now be in-lane.
14802 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14803 SDValue Flipped = DAG.getBitcast(PVT, V1);
14805 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
14806 Flipped = DAG.getBitcast(VT, Flipped);
14807 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
14810 /// Handle lowering 2-lane 128-bit shuffles.
14811 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14812 SDValue V2, ArrayRef<int> Mask,
14813 const APInt &Zeroable,
14814 const X86Subtarget &Subtarget,
14815 SelectionDAG &DAG) {
14816 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14817 if (Subtarget.hasAVX2() && V2.isUndef())
14820 SmallVector<int, 4> WidenedMask;
14821 if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
14824 bool IsLowZero = (Zeroable & 0x3) == 0x3;
14825 bool IsHighZero = (Zeroable & 0xc) == 0xc;
14827 // Try to use an insert into a zero vector.
14828 if (WidenedMask[0] == 0 && IsHighZero) {
14829 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14830 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14831 DAG.getIntPtrConstant(0, DL));
14832 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14833 getZeroVector(VT, Subtarget, DAG, DL), LoV,
14834 DAG.getIntPtrConstant(0, DL));
14837 // TODO: If minimizing size and one of the inputs is a zero vector and the
14838 // the zero vector has only one use, we could use a VPERM2X128 to save the
14839 // instruction bytes needed to explicitly generate the zero vector.
14841 // Blends are faster and handle all the non-lane-crossing cases.
14842 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14846 // If either input operand is a zero vector, use VPERM2X128 because its mask
14847 // allows us to replace the zero input with an implicit zero.
14848 if (!IsLowZero && !IsHighZero) {
14849 // Check for patterns which can be matched with a single insert of a 128-bit
14851 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
14852 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
14854 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14855 // this will likely become vinsertf128 which can't fold a 256-bit memop.
14856 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14857 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14858 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14859 OnlyUsesV1 ? V1 : V2,
14860 DAG.getIntPtrConstant(0, DL));
14861 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14862 DAG.getIntPtrConstant(2, DL));
14866 // Try to use SHUF128 if possible.
14867 if (Subtarget.hasVLX()) {
14868 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14869 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14870 ((WidenedMask[1] % 2) << 1);
14871 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14872 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14877 // Otherwise form a 128-bit permutation. After accounting for undefs,
14878 // convert the 64-bit shuffle mask selection values into 128-bit
14879 // selection bits by dividing the indexes by 2 and shifting into positions
14880 // defined by a vperm2*128 instruction's immediate control byte.
14882 // The immediate permute control byte looks like this:
14883 // [1:0] - select 128 bits from sources for low half of destination
14885 // [3] - zero low half of destination
14886 // [5:4] - select 128 bits from sources for high half of destination
14888 // [7] - zero high half of destination
14890 assert((WidenedMask[0] >= 0 || IsLowZero) &&
14891 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14893 unsigned PermMask = 0;
14894 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
14895 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14897 // Check the immediate mask and replace unused sources with undef.
14898 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14899 V1 = DAG.getUNDEF(VT);
14900 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14901 V2 = DAG.getUNDEF(VT);
14903 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14904 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14907 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14908 /// shuffling each lane.
14910 /// This attempts to create a repeated lane shuffle where each lane uses one
14911 /// or two of the lanes of the inputs. The lanes of the input vectors are
14912 /// shuffled in one or two independent shuffles to get the lanes into the
14913 /// position needed by the final shuffle.
14914 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14915 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14916 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14917 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14919 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14922 int NumElts = Mask.size();
14923 int NumLanes = VT.getSizeInBits() / 128;
14924 int NumLaneElts = 128 / VT.getScalarSizeInBits();
14925 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
14926 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14928 // First pass will try to fill in the RepeatMask from lanes that need two
14930 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14931 int Srcs[2] = {-1, -1};
14932 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
14933 for (int i = 0; i != NumLaneElts; ++i) {
14934 int M = Mask[(Lane * NumLaneElts) + i];
14937 // Determine which of the possible input lanes (NumLanes from each source)
14938 // this element comes from. Assign that as one of the sources for this
14939 // lane. We can assign up to 2 sources for this lane. If we run out
14940 // sources we can't do anything.
14941 int LaneSrc = M / NumLaneElts;
14943 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14945 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14950 Srcs[Src] = LaneSrc;
14951 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
14954 // If this lane has two sources, see if it fits with the repeat mask so far.
14958 LaneSrcs[Lane][0] = Srcs[0];
14959 LaneSrcs[Lane][1] = Srcs[1];
14961 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14962 assert(M1.size() == M2.size() && "Unexpected mask size");
14963 for (int i = 0, e = M1.size(); i != e; ++i)
14964 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14969 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14970 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14971 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14975 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14976 "Unexpected mask element");
14981 if (MatchMasks(InLaneMask, RepeatMask)) {
14982 // Merge this lane mask into the final repeat mask.
14983 MergeMasks(InLaneMask, RepeatMask);
14987 // Didn't find a match. Swap the operands and try again.
14988 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
14989 ShuffleVectorSDNode::commuteMask(InLaneMask);
14991 if (MatchMasks(InLaneMask, RepeatMask)) {
14992 // Merge this lane mask into the final repeat mask.
14993 MergeMasks(InLaneMask, RepeatMask);
14997 // Couldn't find a match with the operands in either order.
15001 // Now handle any lanes with only one source.
15002 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15003 // If this lane has already been processed, skip it.
15004 if (LaneSrcs[Lane][0] >= 0)
15007 for (int i = 0; i != NumLaneElts; ++i) {
15008 int M = Mask[(Lane * NumLaneElts) + i];
15012 // If RepeatMask isn't defined yet we can define it ourself.
15013 if (RepeatMask[i] < 0)
15014 RepeatMask[i] = M % NumLaneElts;
15016 if (RepeatMask[i] < NumElts) {
15017 if (RepeatMask[i] != M % NumLaneElts)
15019 LaneSrcs[Lane][0] = M / NumLaneElts;
15021 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15023 LaneSrcs[Lane][1] = M / NumLaneElts;
15027 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15031 SmallVector<int, 16> NewMask(NumElts, -1);
15032 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15033 int Src = LaneSrcs[Lane][0];
15034 for (int i = 0; i != NumLaneElts; ++i) {
15037 M = Src * NumLaneElts + i;
15038 NewMask[Lane * NumLaneElts + i] = M;
15041 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15042 // Ensure we didn't get back the shuffle we started with.
15043 // FIXME: This is a hack to make up for some splat handling code in
15044 // getVectorShuffle.
15045 if (isa<ShuffleVectorSDNode>(NewV1) &&
15046 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15049 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15050 int Src = LaneSrcs[Lane][1];
15051 for (int i = 0; i != NumLaneElts; ++i) {
15054 M = Src * NumLaneElts + i;
15055 NewMask[Lane * NumLaneElts + i] = M;
15058 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15059 // Ensure we didn't get back the shuffle we started with.
15060 // FIXME: This is a hack to make up for some splat handling code in
15061 // getVectorShuffle.
15062 if (isa<ShuffleVectorSDNode>(NewV2) &&
15063 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15066 for (int i = 0; i != NumElts; ++i) {
15067 NewMask[i] = RepeatMask[i % NumLaneElts];
15068 if (NewMask[i] < 0)
15071 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15073 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15076 /// If the input shuffle mask results in a vector that is undefined in all upper
15077 /// or lower half elements and that mask accesses only 2 halves of the
15078 /// shuffle's operands, return true. A mask of half the width with mask indexes
15079 /// adjusted to access the extracted halves of the original shuffle operands is
15080 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15081 /// lower half of each input operand is accessed.
15083 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15084 int &HalfIdx1, int &HalfIdx2) {
15085 assert((Mask.size() == HalfMask.size() * 2) &&
15086 "Expected input mask to be twice as long as output");
15088 // Exactly one half of the result must be undef to allow narrowing.
15089 bool UndefLower = isUndefLowerHalf(Mask);
15090 bool UndefUpper = isUndefUpperHalf(Mask);
15091 if (UndefLower == UndefUpper)
15094 unsigned HalfNumElts = HalfMask.size();
15095 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15098 for (unsigned i = 0; i != HalfNumElts; ++i) {
15099 int M = Mask[i + MaskIndexOffset];
15105 // Determine which of the 4 half vectors this element is from.
15106 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15107 int HalfIdx = M / HalfNumElts;
15109 // Determine the element index into its half vector source.
15110 int HalfElt = M % HalfNumElts;
15112 // We can shuffle with up to 2 half vectors, set the new 'half'
15113 // shuffle mask accordingly.
15114 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15115 HalfMask[i] = HalfElt;
15116 HalfIdx1 = HalfIdx;
15119 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15120 HalfMask[i] = HalfElt + HalfNumElts;
15121 HalfIdx2 = HalfIdx;
15125 // Too many half vectors referenced.
15132 /// Given the output values from getHalfShuffleMask(), create a half width
15133 /// shuffle of extracted vectors followed by an insert back to full width.
15134 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15135 ArrayRef<int> HalfMask, int HalfIdx1,
15136 int HalfIdx2, bool UndefLower,
15137 SelectionDAG &DAG, bool UseConcat = false) {
15138 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15139 assert(V1.getValueType().isSimple() && "Expecting only simple types");
15141 MVT VT = V1.getSimpleValueType();
15142 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15143 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15145 auto getHalfVector = [&](int HalfIdx) {
15147 return DAG.getUNDEF(HalfVT);
15148 SDValue V = (HalfIdx < 2 ? V1 : V2);
15149 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15150 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15151 DAG.getIntPtrConstant(HalfIdx, DL));
15154 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15155 SDValue Half1 = getHalfVector(HalfIdx1);
15156 SDValue Half2 = getHalfVector(HalfIdx2);
15157 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15160 SDValue Op1 = DAG.getUNDEF(HalfVT);
15162 std::swap(Op0, Op1);
15163 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15166 unsigned Offset = UndefLower ? HalfNumElts : 0;
15167 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15168 DAG.getIntPtrConstant(Offset, DL));
15171 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15172 /// This allows for fast cases such as subvector extraction/insertion
15173 /// or shuffling smaller vector types which can lower more efficiently.
15174 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15175 SDValue V2, ArrayRef<int> Mask,
15176 const X86Subtarget &Subtarget,
15177 SelectionDAG &DAG) {
15178 assert((VT.is256BitVector() || VT.is512BitVector()) &&
15179 "Expected 256-bit or 512-bit vector");
15181 bool UndefLower = isUndefLowerHalf(Mask);
15182 if (!UndefLower && !isUndefUpperHalf(Mask))
15185 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15186 "Completely undef shuffle mask should have been simplified already");
15188 // Upper half is undef and lower half is whole upper subvector.
15189 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15190 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15191 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15193 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15194 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15195 DAG.getIntPtrConstant(HalfNumElts, DL));
15196 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15197 DAG.getIntPtrConstant(0, DL));
15200 // Lower half is undef and upper half is whole lower subvector.
15201 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15203 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15204 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15205 DAG.getIntPtrConstant(0, DL));
15206 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15207 DAG.getIntPtrConstant(HalfNumElts, DL));
15210 int HalfIdx1, HalfIdx2;
15211 SmallVector<int, 8> HalfMask(HalfNumElts);
15212 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15215 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15217 // Only shuffle the halves of the inputs when useful.
15218 unsigned NumLowerHalves =
15219 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15220 unsigned NumUpperHalves =
15221 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15222 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15224 // Determine the larger pattern of undef/halves, then decide if it's worth
15225 // splitting the shuffle based on subtarget capabilities and types.
15226 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15228 // XXXXuuuu: no insert is needed.
15229 // Always extract lowers when setting lower - these are all free subreg ops.
15230 if (NumUpperHalves == 0)
15231 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15234 if (NumUpperHalves == 1) {
15235 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15236 if (Subtarget.hasAVX2()) {
15237 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15238 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15239 !is128BitUnpackShuffleMask(HalfMask) &&
15240 (!isSingleSHUFPSMask(HalfMask) ||
15241 Subtarget.hasFastVariableShuffle()))
15243 // If this is a unary shuffle (assume that the 2nd operand is
15244 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15245 // are better off extracting the upper half of 1 operand and using a
15247 if (EltWidth == 64 && V2.isUndef())
15250 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15251 if (Subtarget.hasAVX512() && VT.is512BitVector())
15253 // Extract + narrow shuffle is better than the wide alternative.
15254 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15258 // Don't extract both uppers, instead shuffle and then extract.
15259 assert(NumUpperHalves == 2 && "Half vector count went wrong");
15263 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15264 if (NumUpperHalves == 0) {
15265 // AVX2 has efficient 64-bit element cross-lane shuffles.
15266 // TODO: Refine to account for unary shuffle, splat, and other masks?
15267 if (Subtarget.hasAVX2() && EltWidth == 64)
15269 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15270 if (Subtarget.hasAVX512() && VT.is512BitVector())
15272 // Narrow shuffle + insert is better than the wide alternative.
15273 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15277 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15281 /// Test whether the specified input (0 or 1) is in-place blended by the
15284 /// This returns true if the elements from a particular input are already in the
15285 /// slot required by the given mask and require no permutation.
15286 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15287 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15288 int Size = Mask.size();
15289 for (int i = 0; i < Size; ++i)
15290 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15296 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15297 /// every lane can be represented as the same repeating mask - allowing us to
15298 /// shuffle the sources with the repeating shuffle and then permute the result
15299 /// to the destination lanes.
15300 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15301 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15302 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15303 int NumElts = VT.getVectorNumElements();
15304 int NumLanes = VT.getSizeInBits() / 128;
15305 int NumLaneElts = NumElts / NumLanes;
15307 // On AVX2 we may be able to just shuffle the lowest elements and then
15308 // broadcast the result.
15309 if (Subtarget.hasAVX2()) {
15310 for (unsigned BroadcastSize : {16, 32, 64}) {
15311 if (BroadcastSize <= VT.getScalarSizeInBits())
15313 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15315 // Attempt to match a repeating pattern every NumBroadcastElts,
15316 // accounting for UNDEFs but only references the lowest 128-bit
15317 // lane of the inputs.
15318 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15319 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15320 for (int j = 0; j != NumBroadcastElts; ++j) {
15321 int M = Mask[i + j];
15324 int &R = RepeatMask[j];
15325 if (0 != ((M % NumElts) / NumLaneElts))
15327 if (0 <= R && R != M)
15334 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15335 if (!FindRepeatingBroadcastMask(RepeatMask))
15338 // Shuffle the (lowest) repeated elements in place for broadcast.
15339 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15341 // Shuffle the actual broadcast.
15342 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15343 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15344 for (int j = 0; j != NumBroadcastElts; ++j)
15345 BroadcastMask[i + j] = j;
15346 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15351 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15352 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15355 // Bail if we already have a repeated lane shuffle mask.
15356 SmallVector<int, 8> RepeatedShuffleMask;
15357 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15360 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15361 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15362 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15363 int NumSubLanes = NumLanes * SubLaneScale;
15364 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15366 // Check that all the sources are coming from the same lane and see if we can
15367 // form a repeating shuffle mask (local to each sub-lane). At the same time,
15368 // determine the source sub-lane for each destination sub-lane.
15369 int TopSrcSubLane = -1;
15370 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15371 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15372 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15373 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15375 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15376 // Extract the sub-lane mask, check that it all comes from the same lane
15377 // and normalize the mask entries to come from the first lane.
15379 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15380 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15381 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15384 int Lane = (M % NumElts) / NumLaneElts;
15385 if ((0 <= SrcLane) && (SrcLane != Lane))
15388 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15389 SubLaneMask[Elt] = LocalM;
15392 // Whole sub-lane is UNDEF.
15396 // Attempt to match against the candidate repeated sub-lane masks.
15397 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15398 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15399 for (int i = 0; i != NumSubLaneElts; ++i) {
15400 if (M1[i] < 0 || M2[i] < 0)
15402 if (M1[i] != M2[i])
15408 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15409 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15412 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15413 for (int i = 0; i != NumSubLaneElts; ++i) {
15414 int M = SubLaneMask[i];
15417 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15418 "Unexpected mask element");
15419 RepeatedSubLaneMask[i] = M;
15422 // Track the top most source sub-lane - by setting the remaining to UNDEF
15423 // we can greatly simplify shuffle matching.
15424 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15425 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15426 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15430 // Bail if we failed to find a matching repeated sub-lane mask.
15431 if (Dst2SrcSubLanes[DstSubLane] < 0)
15434 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15435 "Unexpected source lane");
15437 // Create a repeating shuffle mask for the entire vector.
15438 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15439 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15440 int Lane = SubLane / SubLaneScale;
15441 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15442 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15443 int M = RepeatedSubLaneMask[Elt];
15446 int Idx = (SubLane * NumSubLaneElts) + Elt;
15447 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15450 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15452 // Shuffle each source sub-lane to its destination.
15453 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15454 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15455 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15456 if (SrcSubLane < 0)
15458 for (int j = 0; j != NumSubLaneElts; ++j)
15459 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15462 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15466 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15467 bool &ForceV1Zero, bool &ForceV2Zero,
15468 unsigned &ShuffleImm, ArrayRef<int> Mask,
15469 const APInt &Zeroable) {
15470 int NumElts = VT.getVectorNumElements();
15471 assert(VT.getScalarSizeInBits() == 64 &&
15472 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15473 "Unexpected data type for VSHUFPD");
15474 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15475 "Illegal shuffle mask");
15477 bool ZeroLane[2] = { true, true };
15478 for (int i = 0; i < NumElts; ++i)
15479 ZeroLane[i & 1] &= Zeroable[i];
15481 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15482 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15484 bool ShufpdMask = true;
15485 bool CommutableMask = true;
15486 for (int i = 0; i < NumElts; ++i) {
15487 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15491 int Val = (i & 6) + NumElts * (i & 1);
15492 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15493 if (Mask[i] < Val || Mask[i] > Val + 1)
15494 ShufpdMask = false;
15495 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15496 CommutableMask = false;
15497 ShuffleImm |= (Mask[i] % 2) << i;
15500 if (!ShufpdMask && !CommutableMask)
15503 if (!ShufpdMask && CommutableMask)
15506 ForceV1Zero = ZeroLane[0];
15507 ForceV2Zero = ZeroLane[1];
15511 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15512 SDValue V2, ArrayRef<int> Mask,
15513 const APInt &Zeroable,
15514 const X86Subtarget &Subtarget,
15515 SelectionDAG &DAG) {
15516 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15517 "Unexpected data type for VSHUFPD");
15519 unsigned Immediate = 0;
15520 bool ForceV1Zero = false, ForceV2Zero = false;
15521 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15525 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15527 V1 = getZeroVector(VT, Subtarget, DAG, DL);
15529 V2 = getZeroVector(VT, Subtarget, DAG, DL);
15531 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15532 DAG.getTargetConstant(Immediate, DL, MVT::i8));
15535 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15536 // by zeroable elements in the remaining 24 elements. Turn this into two
15537 // vmovqb instructions shuffled together.
15538 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15539 SDValue V1, SDValue V2,
15540 ArrayRef<int> Mask,
15541 const APInt &Zeroable,
15542 SelectionDAG &DAG) {
15543 assert(VT == MVT::v32i8 && "Unexpected type!");
15545 // The first 8 indices should be every 8th element.
15546 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15549 // Remaining elements need to be zeroable.
15550 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
15553 V1 = DAG.getBitcast(MVT::v4i64, V1);
15554 V2 = DAG.getBitcast(MVT::v4i64, V2);
15556 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15557 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15559 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15560 // the upper bits of the result using an unpckldq.
15561 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15562 { 0, 1, 2, 3, 16, 17, 18, 19,
15563 4, 5, 6, 7, 20, 21, 22, 23 });
15564 // Insert the unpckldq into a zero vector to widen to v32i8.
15565 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15566 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15567 DAG.getIntPtrConstant(0, DL));
15571 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15573 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15574 /// isn't available.
15575 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15576 const APInt &Zeroable, SDValue V1, SDValue V2,
15577 const X86Subtarget &Subtarget,
15578 SelectionDAG &DAG) {
15579 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15580 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15581 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15583 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15587 if (V2.isUndef()) {
15588 // Check for being able to broadcast a single element.
15589 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15590 Mask, Subtarget, DAG))
15593 // Use low duplicate instructions for masks that match their pattern.
15594 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15595 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15597 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15598 // Non-half-crossing single input shuffles can be lowered with an
15599 // interleaved permutation.
15600 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15601 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15602 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15603 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15606 // With AVX2 we have direct support for this permutation.
15607 if (Subtarget.hasAVX2())
15608 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15609 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15611 // Try to create an in-lane repeating shuffle mask and then shuffle the
15612 // results into the target lanes.
15613 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15614 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15617 // Try to permute the lanes and then use a per-lane permute.
15618 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15619 Mask, DAG, Subtarget))
15622 // Otherwise, fall back.
15623 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15627 // Use dedicated unpack instructions for masks that match their pattern.
15628 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15631 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15632 Zeroable, Subtarget, DAG))
15635 // Check if the blend happens to exactly fit that of SHUFPD.
15636 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15637 Zeroable, Subtarget, DAG))
15640 // If we have one input in place, then we can permute the other input and
15641 // blend the result.
15642 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15643 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15646 // Try to create an in-lane repeating shuffle mask and then shuffle the
15647 // results into the target lanes.
15648 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15649 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15652 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15653 // shuffle. However, if we have AVX2 and either inputs are already in place,
15654 // we will be able to shuffle even across lanes the other input in a single
15655 // instruction so skip this pattern.
15656 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15657 isShuffleMaskInputInPlace(1, Mask))))
15658 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15659 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15662 // If we have VLX support, we can use VEXPAND.
15663 if (Subtarget.hasVLX())
15664 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15668 // If we have AVX2 then we always want to lower with a blend because an v4 we
15669 // can fully permute the elements.
15670 if (Subtarget.hasAVX2())
15671 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15674 // Otherwise fall back on generic lowering.
15675 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15679 /// Handle lowering of 4-lane 64-bit integer shuffles.
15681 /// This routine is only called when we have AVX2 and thus a reasonable
15682 /// instruction set for v4i64 shuffling..
15683 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15684 const APInt &Zeroable, SDValue V1, SDValue V2,
15685 const X86Subtarget &Subtarget,
15686 SelectionDAG &DAG) {
15687 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15688 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15689 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15690 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15692 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15696 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15697 Zeroable, Subtarget, DAG))
15700 // Check for being able to broadcast a single element.
15701 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15705 if (V2.isUndef()) {
15706 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15707 // can use lower latency instructions that will operate on both lanes.
15708 SmallVector<int, 2> RepeatedMask;
15709 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15710 SmallVector<int, 4> PSHUFDMask;
15711 scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15712 return DAG.getBitcast(
15714 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15715 DAG.getBitcast(MVT::v8i32, V1),
15716 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15719 // AVX2 provides a direct instruction for permuting a single input across
15721 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15722 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15725 // Try to use shift instructions.
15726 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15727 Zeroable, Subtarget, DAG))
15730 // If we have VLX support, we can use VALIGN or VEXPAND.
15731 if (Subtarget.hasVLX()) {
15732 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15736 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15741 // Try to use PALIGNR.
15742 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15746 // Use dedicated unpack instructions for masks that match their pattern.
15747 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15750 // If we have one input in place, then we can permute the other input and
15751 // blend the result.
15752 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15753 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15756 // Try to create an in-lane repeating shuffle mask and then shuffle the
15757 // results into the target lanes.
15758 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15759 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15762 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15763 // shuffle. However, if we have AVX2 and either inputs are already in place,
15764 // we will be able to shuffle even across lanes the other input in a single
15765 // instruction so skip this pattern.
15766 if (!isShuffleMaskInputInPlace(0, Mask) &&
15767 !isShuffleMaskInputInPlace(1, Mask))
15768 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15769 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15772 // Otherwise fall back on generic blend lowering.
15773 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15777 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15779 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15780 /// isn't available.
15781 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15782 const APInt &Zeroable, SDValue V1, SDValue V2,
15783 const X86Subtarget &Subtarget,
15784 SelectionDAG &DAG) {
15785 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15786 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15787 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15789 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15790 Zeroable, Subtarget, DAG))
15793 // Check for being able to broadcast a single element.
15794 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15798 // If the shuffle mask is repeated in each 128-bit lane, we have many more
15799 // options to efficiently lower the shuffle.
15800 SmallVector<int, 4> RepeatedMask;
15801 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15802 assert(RepeatedMask.size() == 4 &&
15803 "Repeated masks must be half the mask width!");
15805 // Use even/odd duplicate instructions for masks that match their pattern.
15806 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
15807 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15808 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
15809 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15812 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15813 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15815 // Use dedicated unpack instructions for masks that match their pattern.
15816 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15819 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15820 // have already handled any direct blends.
15821 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15824 // Try to create an in-lane repeating shuffle mask and then shuffle the
15825 // results into the target lanes.
15826 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15827 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15830 // If we have a single input shuffle with different shuffle patterns in the
15831 // two 128-bit lanes use the variable mask to VPERMILPS.
15832 if (V2.isUndef()) {
15833 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15834 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
15835 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15837 if (Subtarget.hasAVX2())
15838 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
15840 // Otherwise, fall back.
15841 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
15845 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15847 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15848 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15851 // If we have VLX support, we can use VEXPAND.
15852 if (Subtarget.hasVLX())
15853 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
15857 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15858 // since after split we get a more efficient code using vpunpcklwd and
15859 // vpunpckhwd instrs than vblend.
15860 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
15861 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15865 // If we have AVX2 then we always want to lower with a blend because at v8 we
15866 // can fully permute the elements.
15867 if (Subtarget.hasAVX2())
15868 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
15871 // Otherwise fall back on generic lowering.
15872 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15876 /// Handle lowering of 8-lane 32-bit integer shuffles.
15878 /// This routine is only called when we have AVX2 and thus a reasonable
15879 /// instruction set for v8i32 shuffling..
15880 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15881 const APInt &Zeroable, SDValue V1, SDValue V2,
15882 const X86Subtarget &Subtarget,
15883 SelectionDAG &DAG) {
15884 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15885 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
15886 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15887 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
15889 // Whenever we can lower this as a zext, that instruction is strictly faster
15890 // than any alternative. It also allows us to fold memory operands into the
15891 // shuffle in many cases.
15892 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15893 Zeroable, Subtarget, DAG))
15896 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15897 // since after split we get a more efficient code than vblend by using
15898 // vpunpcklwd and vpunpckhwd instrs.
15899 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
15900 !Subtarget.hasAVX512())
15901 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
15905 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
15906 Zeroable, Subtarget, DAG))
15909 // Check for being able to broadcast a single element.
15910 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
15914 // If the shuffle mask is repeated in each 128-bit lane we can use more
15915 // efficient instructions that mirror the shuffles across the two 128-bit
15917 SmallVector<int, 4> RepeatedMask;
15918 bool Is128BitLaneRepeatedShuffle =
15919 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
15920 if (Is128BitLaneRepeatedShuffle) {
15921 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
15923 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
15924 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15926 // Use dedicated unpack instructions for masks that match their pattern.
15927 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
15931 // Try to use shift instructions.
15932 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
15933 Zeroable, Subtarget, DAG))
15936 // If we have VLX support, we can use VALIGN or EXPAND.
15937 if (Subtarget.hasVLX()) {
15938 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
15942 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
15947 // Try to use byte rotation instructions.
15948 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
15952 // Try to create an in-lane repeating shuffle mask and then shuffle the
15953 // results into the target lanes.
15954 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15955 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15958 // If the shuffle patterns aren't repeated but it is a single input, directly
15959 // generate a cross-lane VPERMD instruction.
15960 if (V2.isUndef()) {
15961 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15962 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
15965 // Assume that a single SHUFPS is faster than an alternative sequence of
15966 // multiple instructions (even if the CPU has a domain penalty).
15967 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15968 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
15969 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
15970 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
15971 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
15972 CastV1, CastV2, DAG);
15973 return DAG.getBitcast(MVT::v8i32, ShufPS);
15976 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15978 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15979 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15982 // Otherwise fall back on generic blend lowering.
15983 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
15987 /// Handle lowering of 16-lane 16-bit integer shuffles.
15989 /// This routine is only called when we have AVX2 and thus a reasonable
15990 /// instruction set for v16i16 shuffling..
15991 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15992 const APInt &Zeroable, SDValue V1, SDValue V2,
15993 const X86Subtarget &Subtarget,
15994 SelectionDAG &DAG) {
15995 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15996 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
15997 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
15998 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16000 // Whenever we can lower this as a zext, that instruction is strictly faster
16001 // than any alternative. It also allows us to fold memory operands into the
16002 // shuffle in many cases.
16003 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16004 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16007 // Check for being able to broadcast a single element.
16008 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16012 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16013 Zeroable, Subtarget, DAG))
16016 // Use dedicated unpack instructions for masks that match their pattern.
16017 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16020 // Use dedicated pack instructions for masks that match their pattern.
16021 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16025 // Try to use shift instructions.
16026 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16027 Zeroable, Subtarget, DAG))
16030 // Try to use byte rotation instructions.
16031 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16035 // Try to create an in-lane repeating shuffle mask and then shuffle the
16036 // results into the target lanes.
16037 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16038 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16041 if (V2.isUndef()) {
16042 // There are no generalized cross-lane shuffle operations available on i16
16044 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16045 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16046 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16049 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16053 SmallVector<int, 8> RepeatedMask;
16054 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16055 // As this is a single-input shuffle, the repeated mask should be
16056 // a strictly valid v8i16 mask that we can pass through to the v8i16
16057 // lowering to handle even the v16 case.
16058 return lowerV8I16GeneralSingleInputShuffle(
16059 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16063 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16064 Zeroable, Subtarget, DAG))
16067 // AVX512BWVL can lower to VPERMW.
16068 if (Subtarget.hasBWI() && Subtarget.hasVLX())
16069 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16071 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16073 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16074 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16077 // Try to permute the lanes and then use a per-lane permute.
16078 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16079 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16082 // Otherwise fall back on generic lowering.
16083 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16087 /// Handle lowering of 32-lane 8-bit integer shuffles.
16089 /// This routine is only called when we have AVX2 and thus a reasonable
16090 /// instruction set for v32i8 shuffling..
16091 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16092 const APInt &Zeroable, SDValue V1, SDValue V2,
16093 const X86Subtarget &Subtarget,
16094 SelectionDAG &DAG) {
16095 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16096 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16097 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16098 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16100 // Whenever we can lower this as a zext, that instruction is strictly faster
16101 // than any alternative. It also allows us to fold memory operands into the
16102 // shuffle in many cases.
16103 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16104 Zeroable, Subtarget, DAG))
16107 // Check for being able to broadcast a single element.
16108 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16112 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16113 Zeroable, Subtarget, DAG))
16116 // Use dedicated unpack instructions for masks that match their pattern.
16117 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16120 // Use dedicated pack instructions for masks that match their pattern.
16121 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16125 // Try to use shift instructions.
16126 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16127 Zeroable, Subtarget, DAG))
16130 // Try to use byte rotation instructions.
16131 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16135 // Try to create an in-lane repeating shuffle mask and then shuffle the
16136 // results into the target lanes.
16137 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16138 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16141 // There are no generalized cross-lane shuffle operations available on i8
16143 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16144 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16145 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16148 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16152 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16153 Zeroable, Subtarget, DAG))
16156 // AVX512VBMIVL can lower to VPERMB.
16157 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16158 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16160 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16162 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16163 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16166 // Try to permute the lanes and then use a per-lane permute.
16167 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16168 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16171 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16172 // by zeroable elements in the remaining 24 elements. Turn this into two
16173 // vmovqb instructions shuffled together.
16174 if (Subtarget.hasVLX())
16175 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16176 Mask, Zeroable, DAG))
16179 // Otherwise fall back on generic lowering.
16180 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16184 /// High-level routine to lower various 256-bit x86 vector shuffles.
16186 /// This routine either breaks down the specific type of a 256-bit x86 vector
16187 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16188 /// together based on the available instructions.
16189 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16190 SDValue V1, SDValue V2, const APInt &Zeroable,
16191 const X86Subtarget &Subtarget,
16192 SelectionDAG &DAG) {
16193 // If we have a single input to the zero element, insert that into V1 if we
16194 // can do so cheaply.
16195 int NumElts = VT.getVectorNumElements();
16196 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16198 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16199 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16200 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16203 // Handle special cases where the lower or upper half is UNDEF.
16205 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16208 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16209 // can check for those subtargets here and avoid much of the subtarget
16210 // querying in the per-vector-type lowering routines. With AVX1 we have
16211 // essentially *zero* ability to manipulate a 256-bit vector with integer
16212 // types. Since we'll use floating point types there eventually, just
16213 // immediately cast everything to a float and operate entirely in that domain.
16214 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16215 int ElementBits = VT.getScalarSizeInBits();
16216 if (ElementBits < 32) {
16217 // No floating point type available, if we can't use the bit operations
16218 // for masking/blending then decompose into 128-bit vectors.
16219 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16222 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16224 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16227 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16228 VT.getVectorNumElements());
16229 V1 = DAG.getBitcast(FpVT, V1);
16230 V2 = DAG.getBitcast(FpVT, V2);
16231 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16234 switch (VT.SimpleTy) {
16236 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16238 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16240 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16242 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16244 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16246 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16249 llvm_unreachable("Not a valid 256-bit x86 vector type!");
16253 /// Try to lower a vector shuffle as a 128-bit shuffles.
16254 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16255 const APInt &Zeroable, SDValue V1, SDValue V2,
16256 const X86Subtarget &Subtarget,
16257 SelectionDAG &DAG) {
16258 assert(VT.getScalarSizeInBits() == 64 &&
16259 "Unexpected element type size for 128bit shuffle.");
16261 // To handle 256 bit vector requires VLX and most probably
16262 // function lowerV2X128VectorShuffle() is better solution.
16263 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16265 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16266 SmallVector<int, 4> WidenedMask;
16267 if (!canWidenShuffleElements(Mask, WidenedMask))
16270 // Try to use an insert into a zero vector.
16271 if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16272 (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16273 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16274 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16275 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16276 DAG.getIntPtrConstant(0, DL));
16277 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16278 getZeroVector(VT, Subtarget, DAG, DL), LoV,
16279 DAG.getIntPtrConstant(0, DL));
16282 // Check for patterns which can be matched with a single insert of a 256-bit
16284 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
16285 {0, 1, 2, 3, 0, 1, 2, 3});
16286 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
16287 {0, 1, 2, 3, 8, 9, 10, 11})) {
16288 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16289 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16290 OnlyUsesV1 ? V1 : V2,
16291 DAG.getIntPtrConstant(0, DL));
16292 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16293 DAG.getIntPtrConstant(4, DL));
16296 assert(WidenedMask.size() == 4);
16298 // See if this is an insertion of the lower 128-bits of V2 into V1.
16299 bool IsInsert = true;
16301 for (int i = 0; i < 4; ++i) {
16302 assert(WidenedMask[i] >= -1);
16303 if (WidenedMask[i] < 0)
16306 // Make sure all V1 subvectors are in place.
16307 if (WidenedMask[i] < 4) {
16308 if (WidenedMask[i] != i) {
16313 // Make sure we only have a single V2 index and its the lowest 128-bits.
16314 if (V2Index >= 0 || WidenedMask[i] != 4) {
16321 if (IsInsert && V2Index >= 0) {
16322 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16323 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16324 DAG.getIntPtrConstant(0, DL));
16325 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16328 // Try to lower to vshuf64x2/vshuf32x4.
16329 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16330 unsigned PermMask = 0;
16331 // Insure elements came from the same Op.
16332 for (int i = 0; i < 4; ++i) {
16333 assert(WidenedMask[i] >= -1);
16334 if (WidenedMask[i] < 0)
16337 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
16338 unsigned OpIndex = i / 2;
16339 if (Ops[OpIndex].isUndef())
16341 else if (Ops[OpIndex] != Op)
16344 // Convert the 128-bit shuffle mask selection values into 128-bit selection
16345 // bits defined by a vshuf64x2 instruction's immediate control byte.
16346 PermMask |= (WidenedMask[i] % 4) << (i * 2);
16349 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16350 DAG.getTargetConstant(PermMask, DL, MVT::i8));
16353 /// Handle lowering of 8-lane 64-bit floating point shuffles.
16354 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16355 const APInt &Zeroable, SDValue V1, SDValue V2,
16356 const X86Subtarget &Subtarget,
16357 SelectionDAG &DAG) {
16358 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16359 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16360 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16362 if (V2.isUndef()) {
16363 // Use low duplicate instructions for masks that match their pattern.
16364 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16365 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16367 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16368 // Non-half-crossing single input shuffles can be lowered with an
16369 // interleaved permutation.
16370 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16371 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16372 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16373 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16374 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16375 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16378 SmallVector<int, 4> RepeatedMask;
16379 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16380 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16381 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16384 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16385 V2, Subtarget, DAG))
16388 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16391 // Check if the blend happens to exactly fit that of SHUFPD.
16392 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16393 Zeroable, Subtarget, DAG))
16396 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16400 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16401 Zeroable, Subtarget, DAG))
16404 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16407 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16408 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16409 const APInt &Zeroable, SDValue V1, SDValue V2,
16410 const X86Subtarget &Subtarget,
16411 SelectionDAG &DAG) {
16412 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16413 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16414 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16416 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16417 // options to efficiently lower the shuffle.
16418 SmallVector<int, 4> RepeatedMask;
16419 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16420 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16422 // Use even/odd duplicate instructions for masks that match their pattern.
16423 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16424 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16425 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16426 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16429 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16430 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16432 // Use dedicated unpack instructions for masks that match their pattern.
16433 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16436 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16437 Zeroable, Subtarget, DAG))
16440 // Otherwise, fall back to a SHUFPS sequence.
16441 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16444 // If we have a single input shuffle with different shuffle patterns in the
16445 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16446 if (V2.isUndef() &&
16447 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16448 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16449 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16452 // If we have AVX512F support, we can use VEXPAND.
16453 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16454 V1, V2, DAG, Subtarget))
16457 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16460 /// Handle lowering of 8-lane 64-bit integer shuffles.
16461 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16462 const APInt &Zeroable, SDValue V1, SDValue V2,
16463 const X86Subtarget &Subtarget,
16464 SelectionDAG &DAG) {
16465 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16466 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16467 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16469 if (V2.isUndef()) {
16470 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16471 // can use lower latency instructions that will operate on all four
16473 SmallVector<int, 2> Repeated128Mask;
16474 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16475 SmallVector<int, 4> PSHUFDMask;
16476 scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16477 return DAG.getBitcast(
16479 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16480 DAG.getBitcast(MVT::v16i32, V1),
16481 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16484 SmallVector<int, 4> Repeated256Mask;
16485 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16486 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16487 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16490 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16491 V2, Subtarget, DAG))
16494 // Try to use shift instructions.
16495 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16496 Zeroable, Subtarget, DAG))
16499 // Try to use VALIGN.
16500 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16504 // Try to use PALIGNR.
16505 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16509 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16511 // If we have AVX512F support, we can use VEXPAND.
16512 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16516 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16517 Zeroable, Subtarget, DAG))
16520 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16523 /// Handle lowering of 16-lane 32-bit integer shuffles.
16524 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16525 const APInt &Zeroable, SDValue V1, SDValue V2,
16526 const X86Subtarget &Subtarget,
16527 SelectionDAG &DAG) {
16528 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16529 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16530 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16532 // Whenever we can lower this as a zext, that instruction is strictly faster
16533 // than any alternative. It also allows us to fold memory operands into the
16534 // shuffle in many cases.
16535 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16536 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16539 // If the shuffle mask is repeated in each 128-bit lane we can use more
16540 // efficient instructions that mirror the shuffles across the four 128-bit
16542 SmallVector<int, 4> RepeatedMask;
16543 bool Is128BitLaneRepeatedShuffle =
16544 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16545 if (Is128BitLaneRepeatedShuffle) {
16546 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16548 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16549 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16551 // Use dedicated unpack instructions for masks that match their pattern.
16552 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16556 // Try to use shift instructions.
16557 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16558 Zeroable, Subtarget, DAG))
16561 // Try to use VALIGN.
16562 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16566 // Try to use byte rotation instructions.
16567 if (Subtarget.hasBWI())
16568 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16572 // Assume that a single SHUFPS is faster than using a permv shuffle.
16573 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16574 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16575 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16576 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16577 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16578 CastV1, CastV2, DAG);
16579 return DAG.getBitcast(MVT::v16i32, ShufPS);
16581 // If we have AVX512F support, we can use VEXPAND.
16582 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16586 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16587 Zeroable, Subtarget, DAG))
16589 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16592 /// Handle lowering of 32-lane 16-bit integer shuffles.
16593 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16594 const APInt &Zeroable, SDValue V1, SDValue V2,
16595 const X86Subtarget &Subtarget,
16596 SelectionDAG &DAG) {
16597 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16598 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16599 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16600 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16602 // Whenever we can lower this as a zext, that instruction is strictly faster
16603 // than any alternative. It also allows us to fold memory operands into the
16604 // shuffle in many cases.
16605 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16606 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16609 // Use dedicated unpack instructions for masks that match their pattern.
16610 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16613 // Try to use shift instructions.
16614 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16615 Zeroable, Subtarget, DAG))
16618 // Try to use byte rotation instructions.
16619 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16623 if (V2.isUndef()) {
16624 SmallVector<int, 8> RepeatedMask;
16625 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16626 // As this is a single-input shuffle, the repeated mask should be
16627 // a strictly valid v8i16 mask that we can pass through to the v8i16
16628 // lowering to handle even the v32 case.
16629 return lowerV8I16GeneralSingleInputShuffle(
16630 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16634 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16635 Zeroable, Subtarget, DAG))
16638 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16639 Zeroable, Subtarget, DAG))
16642 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16645 /// Handle lowering of 64-lane 8-bit integer shuffles.
16646 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16647 const APInt &Zeroable, SDValue V1, SDValue V2,
16648 const X86Subtarget &Subtarget,
16649 SelectionDAG &DAG) {
16650 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16651 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16652 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16653 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16655 // Whenever we can lower this as a zext, that instruction is strictly faster
16656 // than any alternative. It also allows us to fold memory operands into the
16657 // shuffle in many cases.
16658 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16659 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16662 // Use dedicated unpack instructions for masks that match their pattern.
16663 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16666 // Use dedicated pack instructions for masks that match their pattern.
16667 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16671 // Try to use shift instructions.
16672 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16673 Zeroable, Subtarget, DAG))
16676 // Try to use byte rotation instructions.
16677 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16681 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16682 Zeroable, Subtarget, DAG))
16685 // VBMI can use VPERMV/VPERMV3 byte shuffles.
16686 if (Subtarget.hasVBMI())
16687 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16689 // Try to create an in-lane repeating shuffle mask and then shuffle the
16690 // results into the target lanes.
16691 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16692 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16695 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16696 Zeroable, Subtarget, DAG))
16699 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16702 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16703 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16706 // FIXME: Implement direct support for this type!
16707 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16710 /// High-level routine to lower various 512-bit x86 vector shuffles.
16712 /// This routine either breaks down the specific type of a 512-bit x86 vector
16713 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
16714 /// together based on the available instructions.
16715 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16716 MVT VT, SDValue V1, SDValue V2,
16717 const APInt &Zeroable,
16718 const X86Subtarget &Subtarget,
16719 SelectionDAG &DAG) {
16720 assert(Subtarget.hasAVX512() &&
16721 "Cannot lower 512-bit vectors w/ basic ISA!");
16723 // If we have a single input to the zero element, insert that into V1 if we
16724 // can do so cheaply.
16725 int NumElts = Mask.size();
16726 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16728 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16729 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16730 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16733 // Handle special cases where the lower or upper half is UNDEF.
16735 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16738 // Check for being able to broadcast a single element.
16739 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16743 // Dispatch to each element type for lowering. If we don't have support for
16744 // specific element type shuffles at 512 bits, immediately split them and
16745 // lower them. Each lowering routine of a given type is allowed to assume that
16746 // the requisite ISA extensions for that element type are available.
16747 switch (VT.SimpleTy) {
16749 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16751 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16753 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16755 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16757 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16759 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16762 llvm_unreachable("Not a valid 512-bit x86 vector type!");
16766 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
16767 MVT VT, SDValue V1, SDValue V2,
16768 const X86Subtarget &Subtarget,
16769 SelectionDAG &DAG) {
16770 // Shuffle should be unary.
16775 int NumElts = Mask.size();
16776 for (int i = 0; i != NumElts; ++i) {
16778 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
16779 "Unexpected mask index.");
16783 // The first non-undef element determines our shift amount.
16784 if (ShiftAmt < 0) {
16786 // Need to be shifting right.
16790 // All non-undef elements must shift by the same amount.
16791 if (ShiftAmt != M - i)
16794 assert(ShiftAmt >= 0 && "All undef?");
16796 // Great we found a shift right.
16798 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
16799 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16800 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
16801 DAG.getUNDEF(WideVT), V1,
16802 DAG.getIntPtrConstant(0, DL));
16803 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
16804 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
16805 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
16806 DAG.getIntPtrConstant(0, DL));
16809 // Determine if this shuffle can be implemented with a KSHIFT instruction.
16810 // Returns the shift amount if possible or -1 if not. This is a simplified
16811 // version of matchShuffleAsShift.
16812 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
16813 int MaskOffset, const APInt &Zeroable) {
16814 int Size = Mask.size();
16816 auto CheckZeros = [&](int Shift, bool Left) {
16817 for (int j = 0; j < Shift; ++j)
16818 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
16824 auto MatchShift = [&](int Shift, bool Left) {
16825 unsigned Pos = Left ? Shift : 0;
16826 unsigned Low = Left ? 0 : Shift;
16827 unsigned Len = Size - Shift;
16828 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
16831 for (int Shift = 1; Shift != Size; ++Shift)
16832 for (bool Left : {true, false})
16833 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
16834 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
16842 // Lower vXi1 vector shuffles.
16843 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
16844 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
16845 // vector, shuffle and then truncate it back.
16846 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16847 MVT VT, SDValue V1, SDValue V2,
16848 const APInt &Zeroable,
16849 const X86Subtarget &Subtarget,
16850 SelectionDAG &DAG) {
16851 assert(Subtarget.hasAVX512() &&
16852 "Cannot lower 512-bit vectors w/o basic ISA!");
16854 int NumElts = Mask.size();
16856 // Try to recognize shuffles that are just padding a subvector with zeros.
16857 int SubvecElts = 0;
16859 for (int i = 0; i != NumElts; ++i) {
16860 if (Mask[i] >= 0) {
16861 // Grab the source from the first valid mask. All subsequent elements need
16862 // to use this same source.
16864 Src = Mask[i] / NumElts;
16865 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
16871 assert(SubvecElts != NumElts && "Identity shuffle?");
16873 // Clip to a power 2.
16874 SubvecElts = PowerOf2Floor(SubvecElts);
16876 // Make sure the number of zeroable bits in the top at least covers the bits
16877 // not covered by the subvector.
16878 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
16879 assert(Src >= 0 && "Expected a source!");
16880 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
16881 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
16882 Src == 0 ? V1 : V2,
16883 DAG.getIntPtrConstant(0, DL));
16884 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16885 DAG.getConstant(0, DL, VT),
16886 Extract, DAG.getIntPtrConstant(0, DL));
16889 // Try a simple shift right with undef elements. Later we'll try with zeros.
16890 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
16894 // Try to match KSHIFTs.
16895 unsigned Offset = 0;
16896 for (SDValue V : { V1, V2 }) {
16898 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
16899 if (ShiftAmt >= 0) {
16901 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
16902 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16903 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
16904 DAG.getUNDEF(WideVT), V,
16905 DAG.getIntPtrConstant(0, DL));
16906 // Widened right shifts need two shifts to ensure we shift in zeroes.
16907 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
16908 int WideElts = WideVT.getVectorNumElements();
16909 // Shift left to put the original vector in the MSBs of the new size.
16910 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
16911 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
16912 // Increase the shift amount to account for the left shift.
16913 ShiftAmt += WideElts - NumElts;
16916 Res = DAG.getNode(Opcode, DL, WideVT, Res,
16917 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
16918 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
16919 DAG.getIntPtrConstant(0, DL));
16921 Offset += NumElts; // Increment for next iteration.
16927 switch (VT.SimpleTy) {
16929 llvm_unreachable("Expected a vector of i1 elements");
16931 ExtVT = MVT::v2i64;
16934 ExtVT = MVT::v4i32;
16937 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
16939 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
16942 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16943 // 256-bit operation available.
16944 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
16947 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16948 // 256-bit operation available.
16949 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
16950 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
16953 ExtVT = MVT::v64i8;
16957 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
16958 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
16960 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
16961 // i1 was sign extended we can use X86ISD::CVT2MASK.
16962 int NumElems = VT.getVectorNumElements();
16963 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
16964 (Subtarget.hasDQI() && (NumElems < 32)))
16965 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
16966 Shuffle, ISD::SETGT);
16968 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
16971 /// Helper function that returns true if the shuffle mask should be
16972 /// commuted to improve canonicalization.
16973 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
16974 int NumElements = Mask.size();
16976 int NumV1Elements = 0, NumV2Elements = 0;
16980 else if (M < NumElements)
16985 // Commute the shuffle as needed such that more elements come from V1 than
16986 // V2. This allows us to match the shuffle pattern strictly on how many
16987 // elements come from V1 without handling the symmetric cases.
16988 if (NumV2Elements > NumV1Elements)
16991 assert(NumV1Elements > 0 && "No V1 indices");
16993 if (NumV2Elements == 0)
16996 // When the number of V1 and V2 elements are the same, try to minimize the
16997 // number of uses of V2 in the low half of the vector. When that is tied,
16998 // ensure that the sum of indices for V1 is equal to or lower than the sum
16999 // indices for V2. When those are equal, try to ensure that the number of odd
17000 // indices for V1 is lower than the number of odd indices for V2.
17001 if (NumV1Elements == NumV2Elements) {
17002 int LowV1Elements = 0, LowV2Elements = 0;
17003 for (int M : Mask.slice(0, NumElements / 2))
17004 if (M >= NumElements)
17008 if (LowV2Elements > LowV1Elements)
17010 if (LowV2Elements == LowV1Elements) {
17011 int SumV1Indices = 0, SumV2Indices = 0;
17012 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17013 if (Mask[i] >= NumElements)
17015 else if (Mask[i] >= 0)
17017 if (SumV2Indices < SumV1Indices)
17019 if (SumV2Indices == SumV1Indices) {
17020 int NumV1OddIndices = 0, NumV2OddIndices = 0;
17021 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17022 if (Mask[i] >= NumElements)
17023 NumV2OddIndices += i % 2;
17024 else if (Mask[i] >= 0)
17025 NumV1OddIndices += i % 2;
17026 if (NumV2OddIndices < NumV1OddIndices)
17035 /// Top-level lowering for x86 vector shuffles.
17037 /// This handles decomposition, canonicalization, and lowering of all x86
17038 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17039 /// above in helper routines. The canonicalization attempts to widen shuffles
17040 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17041 /// s.t. only one of the two inputs needs to be tested, etc.
17042 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
17043 SelectionDAG &DAG) {
17044 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17045 ArrayRef<int> OrigMask = SVOp->getMask();
17046 SDValue V1 = Op.getOperand(0);
17047 SDValue V2 = Op.getOperand(1);
17048 MVT VT = Op.getSimpleValueType();
17049 int NumElements = VT.getVectorNumElements();
17051 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17053 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17054 "Can't lower MMX shuffles");
17056 bool V1IsUndef = V1.isUndef();
17057 bool V2IsUndef = V2.isUndef();
17058 if (V1IsUndef && V2IsUndef)
17059 return DAG.getUNDEF(VT);
17061 // When we create a shuffle node we put the UNDEF node to second operand,
17062 // but in some cases the first operand may be transformed to UNDEF.
17063 // In this case we should just commute the node.
17065 return DAG.getCommutedVectorShuffle(*SVOp);
17067 // Check for non-undef masks pointing at an undef vector and make the masks
17068 // undef as well. This makes it easier to match the shuffle based solely on
17071 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17072 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17073 for (int &M : NewMask)
17074 if (M >= NumElements)
17076 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17079 // Check for illegal shuffle mask element index values.
17080 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17081 (void)MaskUpperLimit;
17082 assert(llvm::all_of(OrigMask,
17083 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17084 "Out of bounds shuffle index");
17086 // We actually see shuffles that are entirely re-arrangements of a set of
17087 // zero inputs. This mostly happens while decomposing complex shuffles into
17088 // simple ones. Directly lower these as a buildvector of zeros.
17089 APInt Zeroable = computeZeroableShuffleElements(OrigMask, V1, V2);
17090 if (Zeroable.isAllOnesValue())
17091 return getZeroVector(VT, Subtarget, DAG, DL);
17093 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17095 // Create an alternative mask with info about zeroable elements.
17096 // Here we do not set undef elements as zeroable.
17097 SmallVector<int, 64> ZeroableMask(OrigMask.begin(), OrigMask.end());
17099 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
17100 for (int i = 0; i != NumElements; ++i)
17101 if (OrigMask[i] != SM_SentinelUndef && Zeroable[i])
17102 ZeroableMask[i] = SM_SentinelZero;
17105 // Try to collapse shuffles into using a vector type with fewer elements but
17106 // wider element types. We cap this to not form integers or floating point
17107 // elements wider than 64 bits, but it might be interesting to form i128
17108 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17109 SmallVector<int, 16> WidenedMask;
17110 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17111 canWidenShuffleElements(ZeroableMask, WidenedMask)) {
17112 // Shuffle mask widening should not interfere with a broadcast opportunity
17113 // by obfuscating the operands with bitcasts.
17114 // TODO: Avoid lowering directly from this top-level function: make this
17115 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17116 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17120 MVT NewEltVT = VT.isFloatingPoint()
17121 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17122 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17123 int NewNumElts = NumElements / 2;
17124 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17125 // Make sure that the new vector type is legal. For example, v2f64 isn't
17127 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17129 // Modify the new Mask to take all zeros from the all-zero vector.
17130 // Choose indices that are blend-friendly.
17131 bool UsedZeroVector = false;
17132 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17133 "V2's non-undef elements are used?!");
17134 for (int i = 0; i != NewNumElts; ++i)
17135 if (WidenedMask[i] == SM_SentinelZero) {
17136 WidenedMask[i] = i + NewNumElts;
17137 UsedZeroVector = true;
17139 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17140 // some elements to be undef.
17141 if (UsedZeroVector)
17142 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17144 V1 = DAG.getBitcast(NewVT, V1);
17145 V2 = DAG.getBitcast(NewVT, V2);
17146 return DAG.getBitcast(
17147 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17151 // Commute the shuffle if it will improve canonicalization.
17152 SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17153 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17154 ShuffleVectorSDNode::commuteMask(Mask);
17158 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17161 // For each vector width, delegate to a specialized lowering routine.
17162 if (VT.is128BitVector())
17163 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17165 if (VT.is256BitVector())
17166 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17168 if (VT.is512BitVector())
17169 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17172 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17174 llvm_unreachable("Unimplemented!");
17177 /// Try to lower a VSELECT instruction to a vector shuffle.
17178 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17179 const X86Subtarget &Subtarget,
17180 SelectionDAG &DAG) {
17181 SDValue Cond = Op.getOperand(0);
17182 SDValue LHS = Op.getOperand(1);
17183 SDValue RHS = Op.getOperand(2);
17184 MVT VT = Op.getSimpleValueType();
17186 // Only non-legal VSELECTs reach this lowering, convert those into generic
17187 // shuffles and re-use the shuffle lowering path for blends.
17188 SmallVector<int, 32> Mask;
17189 if (createShuffleMaskFromVSELECT(Mask, Cond))
17190 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17195 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17196 SDValue Cond = Op.getOperand(0);
17197 SDValue LHS = Op.getOperand(1);
17198 SDValue RHS = Op.getOperand(2);
17200 // A vselect where all conditions and data are constants can be optimized into
17201 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17202 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17203 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17204 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17207 // Try to lower this to a blend-style vector shuffle. This can handle all
17208 // constant condition cases.
17209 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17212 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17213 // with patterns on the mask registers on AVX-512.
17214 MVT CondVT = Cond.getSimpleValueType();
17215 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17216 if (CondEltSize == 1)
17219 // Variable blends are only legal from SSE4.1 onward.
17220 if (!Subtarget.hasSSE41())
17224 MVT VT = Op.getSimpleValueType();
17225 unsigned EltSize = VT.getScalarSizeInBits();
17226 unsigned NumElts = VT.getVectorNumElements();
17228 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17229 // into an i1 condition so that we can use the mask-based 512-bit blend
17231 if (VT.getSizeInBits() == 512) {
17232 // Build a mask by testing the condition against zero.
17233 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17234 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17235 DAG.getConstant(0, dl, CondVT),
17237 // Now return a new VSELECT using the mask.
17238 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17241 // SEXT/TRUNC cases where the mask doesn't match the destination size.
17242 if (CondEltSize != EltSize) {
17243 // If we don't have a sign splat, rely on the expansion.
17244 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17247 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17248 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17249 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17250 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17253 // Only some types will be legal on some subtargets. If we can emit a legal
17254 // VSELECT-matching blend, return Op, and but if we need to expand, return
17256 switch (VT.SimpleTy) {
17258 // Most of the vector types have blends past SSE4.1.
17262 // The byte blends for AVX vectors were introduced only in AVX2.
17263 if (Subtarget.hasAVX2())
17269 case MVT::v16i16: {
17270 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17271 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17272 Cond = DAG.getBitcast(CastVT, Cond);
17273 LHS = DAG.getBitcast(CastVT, LHS);
17274 RHS = DAG.getBitcast(CastVT, RHS);
17275 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17276 return DAG.getBitcast(VT, Select);
17281 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17282 MVT VT = Op.getSimpleValueType();
17285 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
17288 if (VT.getSizeInBits() == 8) {
17289 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
17290 Op.getOperand(0), Op.getOperand(1));
17291 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17294 if (VT == MVT::f32) {
17295 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17296 // the result back to FR32 register. It's only worth matching if the
17297 // result has a single use which is a store or a bitcast to i32. And in
17298 // the case of a store, it's not worth it if the index is a constant 0,
17299 // because a MOVSSmr can be used instead, which is smaller and faster.
17300 if (!Op.hasOneUse())
17302 SDNode *User = *Op.getNode()->use_begin();
17303 if ((User->getOpcode() != ISD::STORE ||
17304 isNullConstant(Op.getOperand(1))) &&
17305 (User->getOpcode() != ISD::BITCAST ||
17306 User->getValueType(0) != MVT::i32))
17308 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17309 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
17311 return DAG.getBitcast(MVT::f32, Extract);
17314 if (VT == MVT::i32 || VT == MVT::i64) {
17315 // ExtractPS/pextrq works with constant index.
17316 if (isa<ConstantSDNode>(Op.getOperand(1)))
17323 /// Extract one bit from mask vector, like v16i1 or v8i1.
17324 /// AVX-512 feature.
17325 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17326 const X86Subtarget &Subtarget) {
17327 SDValue Vec = Op.getOperand(0);
17329 MVT VecVT = Vec.getSimpleValueType();
17330 SDValue Idx = Op.getOperand(1);
17331 MVT EltVT = Op.getSimpleValueType();
17333 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17334 "Unexpected vector type in ExtractBitFromMaskVector");
17336 // variable index can't be handled in mask registers,
17337 // extend vector to VR512/128
17338 if (!isa<ConstantSDNode>(Idx)) {
17339 unsigned NumElts = VecVT.getVectorNumElements();
17340 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17341 // than extending to 128/256bit.
17342 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17343 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17344 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17345 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17346 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17349 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17350 if (IdxVal == 0) // the operation is legal
17353 // Extend to natively supported kshift.
17354 unsigned NumElems = VecVT.getVectorNumElements();
17355 MVT WideVecVT = VecVT;
17356 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17357 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17358 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17359 DAG.getUNDEF(WideVecVT), Vec,
17360 DAG.getIntPtrConstant(0, dl));
17363 // Use kshiftr instruction to move to the lower element.
17364 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17365 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17367 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17368 DAG.getIntPtrConstant(0, dl));
17372 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17373 SelectionDAG &DAG) const {
17375 SDValue Vec = Op.getOperand(0);
17376 MVT VecVT = Vec.getSimpleValueType();
17377 SDValue Idx = Op.getOperand(1);
17379 if (VecVT.getVectorElementType() == MVT::i1)
17380 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17382 if (!isa<ConstantSDNode>(Idx)) {
17383 // Its more profitable to go through memory (1 cycles throughput)
17384 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
17385 // IACA tool was used to get performance estimation
17386 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17388 // example : extractelement <16 x i8> %a, i32 %i
17390 // Block Throughput: 3.00 Cycles
17391 // Throughput Bottleneck: Port5
17393 // | Num Of | Ports pressure in cycles | |
17394 // | Uops | 0 - DV | 5 | 6 | 7 | |
17395 // ---------------------------------------------
17396 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
17397 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
17398 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
17399 // Total Num Of Uops: 4
17402 // Block Throughput: 1.00 Cycles
17403 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17405 // | | Ports pressure in cycles | |
17406 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
17407 // ---------------------------------------------------------
17408 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17409 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
17410 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
17411 // Total Num Of Uops: 4
17416 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17418 // If this is a 256-bit vector result, first extract the 128-bit vector and
17419 // then extract the element from the 128-bit vector.
17420 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17421 // Get the 128-bit vector.
17422 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17423 MVT EltVT = VecVT.getVectorElementType();
17425 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17426 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17428 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17429 // this can be done with a mask.
17430 IdxVal &= ElemsPerChunk - 1;
17431 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17432 DAG.getIntPtrConstant(IdxVal, dl));
17435 assert(VecVT.is128BitVector() && "Unexpected vector length");
17437 MVT VT = Op.getSimpleValueType();
17439 if (VT.getSizeInBits() == 16) {
17440 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17441 // we're going to zero extend the register or fold the store (SSE41 only).
17442 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17443 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17444 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17445 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17446 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17448 // Transform it so it match pextrw which produces a 32-bit result.
17449 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17450 Op.getOperand(0), Op.getOperand(1));
17451 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17454 if (Subtarget.hasSSE41())
17455 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17458 // TODO: We only extract a single element from v16i8, we can probably afford
17459 // to be more aggressive here before using the default approach of spilling to
17461 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17462 // Extract either the lowest i32 or any i16, and extract the sub-byte.
17463 int DWordIdx = IdxVal / 4;
17464 if (DWordIdx == 0) {
17465 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17466 DAG.getBitcast(MVT::v4i32, Vec),
17467 DAG.getIntPtrConstant(DWordIdx, dl));
17468 int ShiftVal = (IdxVal % 4) * 8;
17470 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17471 DAG.getConstant(ShiftVal, dl, MVT::i8));
17472 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17475 int WordIdx = IdxVal / 2;
17476 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17477 DAG.getBitcast(MVT::v8i16, Vec),
17478 DAG.getIntPtrConstant(WordIdx, dl));
17479 int ShiftVal = (IdxVal % 2) * 8;
17481 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17482 DAG.getConstant(ShiftVal, dl, MVT::i8));
17483 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17486 if (VT.getSizeInBits() == 32) {
17490 // SHUFPS the element to the lowest double word, then movss.
17491 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17492 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17493 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17494 DAG.getIntPtrConstant(0, dl));
17497 if (VT.getSizeInBits() == 64) {
17498 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17499 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17500 // to match extract_elt for f64.
17504 // UNPCKHPD the element to the lowest double word, then movsd.
17505 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17506 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17507 int Mask[2] = { 1, -1 };
17508 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17509 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17510 DAG.getIntPtrConstant(0, dl));
17516 /// Insert one bit to mask vector, like v16i1 or v8i1.
17517 /// AVX-512 feature.
17518 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17519 const X86Subtarget &Subtarget) {
17521 SDValue Vec = Op.getOperand(0);
17522 SDValue Elt = Op.getOperand(1);
17523 SDValue Idx = Op.getOperand(2);
17524 MVT VecVT = Vec.getSimpleValueType();
17526 if (!isa<ConstantSDNode>(Idx)) {
17527 // Non constant index. Extend source and destination,
17528 // insert element and then truncate the result.
17529 unsigned NumElts = VecVT.getVectorNumElements();
17530 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17531 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17532 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17533 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17534 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17535 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17538 // Copy into a k-register, extract to v1i1 and insert_subvector.
17539 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17541 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17545 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17546 SelectionDAG &DAG) const {
17547 MVT VT = Op.getSimpleValueType();
17548 MVT EltVT = VT.getVectorElementType();
17549 unsigned NumElts = VT.getVectorNumElements();
17551 if (EltVT == MVT::i1)
17552 return InsertBitToMaskVector(Op, DAG, Subtarget);
17555 SDValue N0 = Op.getOperand(0);
17556 SDValue N1 = Op.getOperand(1);
17557 SDValue N2 = Op.getOperand(2);
17559 auto *N2C = dyn_cast<ConstantSDNode>(N2);
17560 if (!N2C || N2C->getAPIntValue().uge(NumElts))
17562 uint64_t IdxVal = N2C->getZExtValue();
17564 bool IsZeroElt = X86::isZeroNode(N1);
17565 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17567 // If we are inserting a element, see if we can do this more efficiently with
17568 // a blend shuffle with a rematerializable vector than a costly integer
17570 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17571 16 <= EltVT.getSizeInBits()) {
17572 SmallVector<int, 8> BlendMask;
17573 for (unsigned i = 0; i != NumElts; ++i)
17574 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17575 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17576 : getOnesVector(VT, DAG, dl);
17577 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17580 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17581 // into that, and then insert the subvector back into the result.
17582 if (VT.is256BitVector() || VT.is512BitVector()) {
17583 // With a 256-bit vector, we can insert into the zero element efficiently
17584 // using a blend if we have AVX or AVX2 and the right data type.
17585 if (VT.is256BitVector() && IdxVal == 0) {
17586 // TODO: It is worthwhile to cast integer to floating point and back
17587 // and incur a domain crossing penalty if that's what we'll end up
17588 // doing anyway after extracting to a 128-bit vector.
17589 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17590 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17591 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17592 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
17593 DAG.getTargetConstant(1, dl, MVT::i8));
17597 // Get the desired 128-bit vector chunk.
17598 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17600 // Insert the element into the desired chunk.
17601 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17602 assert(isPowerOf2_32(NumEltsIn128));
17603 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17604 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17606 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17607 DAG.getIntPtrConstant(IdxIn128, dl));
17609 // Insert the changed part back into the bigger vector
17610 return insert128BitVector(N0, V, IdxVal, DAG, dl);
17612 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17614 // This will be just movd/movq/movss/movsd.
17615 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17616 (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17617 EltVT == MVT::i64)) {
17618 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17619 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17622 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17623 // argument. SSE41 required for pinsrb.
17624 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17626 if (VT == MVT::v8i16) {
17627 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17628 Opc = X86ISD::PINSRW;
17630 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17631 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17632 Opc = X86ISD::PINSRB;
17635 if (N1.getValueType() != MVT::i32)
17636 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17637 if (N2.getValueType() != MVT::i32)
17638 N2 = DAG.getIntPtrConstant(IdxVal, dl);
17639 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17642 if (Subtarget.hasSSE41()) {
17643 if (EltVT == MVT::f32) {
17644 // Bits [7:6] of the constant are the source select. This will always be
17645 // zero here. The DAG Combiner may combine an extract_elt index into
17646 // these bits. For example (insert (extract, 3), 2) could be matched by
17647 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17648 // Bits [5:4] of the constant are the destination select. This is the
17649 // value of the incoming immediate.
17650 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17651 // combine either bitwise AND or insert of float 0.0 to set these bits.
17653 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17654 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17655 // If this is an insertion of 32-bits into the low 32-bits of
17656 // a vector, we prefer to generate a blend with immediate rather
17657 // than an insertps. Blends are simpler operations in hardware and so
17658 // will always have equal or better performance than insertps.
17659 // But if optimizing for size and there's a load folding opportunity,
17660 // generate insertps because blendps does not have a 32-bit memory
17662 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17663 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
17664 DAG.getTargetConstant(1, dl, MVT::i8));
17666 // Create this as a scalar to vector..
17667 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17668 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
17669 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
17672 // PINSR* works with constant index.
17673 if (EltVT == MVT::i32 || EltVT == MVT::i64)
17680 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17681 SelectionDAG &DAG) {
17683 MVT OpVT = Op.getSimpleValueType();
17685 // It's always cheaper to replace a xor+movd with xorps and simplifies further
17687 if (X86::isZeroNode(Op.getOperand(0)))
17688 return getZeroVector(OpVT, Subtarget, DAG, dl);
17690 // If this is a 256-bit vector result, first insert into a 128-bit
17691 // vector and then insert into the 256-bit vector.
17692 if (!OpVT.is128BitVector()) {
17693 // Insert into a 128-bit vector.
17694 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17695 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17696 OpVT.getVectorNumElements() / SizeFactor);
17698 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17700 // Insert the 128-bit vector.
17701 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17703 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17704 "Expected an SSE type!");
17706 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17707 if (OpVT == MVT::v4i32)
17710 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17711 return DAG.getBitcast(
17712 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17715 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
17716 // simple superregister reference or explicit instructions to insert
17717 // the upper bits of a vector.
17718 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17719 SelectionDAG &DAG) {
17720 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17722 return insert1BitVector(Op, DAG, Subtarget);
17725 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17726 SelectionDAG &DAG) {
17727 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17728 "Only vXi1 extract_subvectors need custom lowering");
17731 SDValue Vec = Op.getOperand(0);
17732 SDValue Idx = Op.getOperand(1);
17734 if (!isa<ConstantSDNode>(Idx))
17737 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17738 if (IdxVal == 0) // the operation is legal
17741 MVT VecVT = Vec.getSimpleValueType();
17742 unsigned NumElems = VecVT.getVectorNumElements();
17744 // Extend to natively supported kshift.
17745 MVT WideVecVT = VecVT;
17746 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17747 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17748 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17749 DAG.getUNDEF(WideVecVT), Vec,
17750 DAG.getIntPtrConstant(0, dl));
17753 // Shift to the LSB.
17754 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17755 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17757 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
17758 DAG.getIntPtrConstant(0, dl));
17761 // Returns the appropriate wrapper opcode for a global reference.
17762 unsigned X86TargetLowering::getGlobalWrapperKind(
17763 const GlobalValue *GV, const unsigned char OpFlags) const {
17764 // References to absolute symbols are never PC-relative.
17765 if (GV && GV->isAbsoluteSymbolRef())
17766 return X86ISD::Wrapper;
17768 CodeModel::Model M = getTargetMachine().getCodeModel();
17769 if (Subtarget.isPICStyleRIPRel() &&
17770 (M == CodeModel::Small || M == CodeModel::Kernel))
17771 return X86ISD::WrapperRIP;
17773 // GOTPCREL references must always use RIP.
17774 if (OpFlags == X86II::MO_GOTPCREL)
17775 return X86ISD::WrapperRIP;
17777 return X86ISD::Wrapper;
17780 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
17781 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
17782 // one of the above mentioned nodes. It has to be wrapped because otherwise
17783 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
17784 // be used to form addressing mode. These wrapped nodes will be selected
17787 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
17788 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
17790 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17791 // global base reg.
17792 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17794 auto PtrVT = getPointerTy(DAG.getDataLayout());
17795 SDValue Result = DAG.getTargetConstantPool(
17796 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
17798 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17799 // With PIC, the address is actually $g + Offset.
17802 DAG.getNode(ISD::ADD, DL, PtrVT,
17803 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17809 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
17810 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
17812 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17813 // global base reg.
17814 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17816 auto PtrVT = getPointerTy(DAG.getDataLayout());
17817 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
17819 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17821 // With PIC, the address is actually $g + Offset.
17824 DAG.getNode(ISD::ADD, DL, PtrVT,
17825 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17830 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
17831 SelectionDAG &DAG) const {
17832 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17836 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
17837 // Create the TargetBlockAddressAddress node.
17838 unsigned char OpFlags =
17839 Subtarget.classifyBlockAddressReference();
17840 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
17841 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
17843 auto PtrVT = getPointerTy(DAG.getDataLayout());
17844 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
17845 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
17847 // With PIC, the address is actually $g + Offset.
17848 if (isGlobalRelativeToPICBase(OpFlags)) {
17849 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17850 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17856 /// Creates target global address or external symbol nodes for calls or
17858 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
17859 bool ForCall) const {
17860 // Unpack the global address or external symbol.
17861 const SDLoc &dl = SDLoc(Op);
17862 const GlobalValue *GV = nullptr;
17863 int64_t Offset = 0;
17864 const char *ExternalSym = nullptr;
17865 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
17866 GV = G->getGlobal();
17867 Offset = G->getOffset();
17869 const auto *ES = cast<ExternalSymbolSDNode>(Op);
17870 ExternalSym = ES->getSymbol();
17873 // Calculate some flags for address lowering.
17874 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
17875 unsigned char OpFlags;
17877 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
17879 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
17880 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
17881 bool NeedsLoad = isGlobalStubReference(OpFlags);
17883 CodeModel::Model M = DAG.getTarget().getCodeModel();
17884 auto PtrVT = getPointerTy(DAG.getDataLayout());
17888 // Create a target global address if this is a global. If possible, fold the
17889 // offset into the global address reference. Otherwise, ADD it on later.
17890 int64_t GlobalOffset = 0;
17891 if (OpFlags == X86II::MO_NO_FLAG &&
17892 X86::isOffsetSuitableForCodeModel(Offset, M)) {
17893 std::swap(GlobalOffset, Offset);
17895 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
17897 // If this is not a global address, this must be an external symbol.
17898 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
17901 // If this is a direct call, avoid the wrapper if we don't need to do any
17902 // loads or adds. This allows SDAG ISel to match direct calls.
17903 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
17906 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
17908 // With PIC, the address is actually $g + Offset.
17910 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17911 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17914 // For globals that require a load from a stub to get the address, emit the
17917 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
17918 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17920 // If there was a non-zero offset that we didn't fold, create an explicit
17921 // addition for it.
17923 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
17924 DAG.getConstant(Offset, dl, PtrVT));
17930 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
17931 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17935 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
17936 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
17937 unsigned char OperandFlags, bool LocalDynamic = false) {
17938 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17939 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17941 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17942 GA->getValueType(0),
17946 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
17950 SDValue Ops[] = { Chain, TGA, *InFlag };
17951 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17953 SDValue Ops[] = { Chain, TGA };
17954 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17957 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
17958 MFI.setAdjustsStack(true);
17959 MFI.setHasCalls(true);
17961 SDValue Flag = Chain.getValue(1);
17962 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
17965 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
17967 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17970 SDLoc dl(GA); // ? function entry point might be better
17971 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17972 DAG.getNode(X86ISD::GlobalBaseReg,
17973 SDLoc(), PtrVT), InFlag);
17974 InFlag = Chain.getValue(1);
17976 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
17979 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
17981 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17983 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
17984 X86::RAX, X86II::MO_TLSGD);
17987 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
17993 // Get the start address of the TLS block for this module.
17994 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
17995 .getInfo<X86MachineFunctionInfo>();
17996 MFI->incNumLocalDynamicTLSAccesses();
18000 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18001 X86II::MO_TLSLD, /*LocalDynamic=*/true);
18004 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18005 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18006 InFlag = Chain.getValue(1);
18007 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18008 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18011 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18015 unsigned char OperandFlags = X86II::MO_DTPOFF;
18016 unsigned WrapperKind = X86ISD::Wrapper;
18017 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18018 GA->getValueType(0),
18019 GA->getOffset(), OperandFlags);
18020 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18022 // Add x@dtpoff with the base.
18023 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18026 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18027 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18028 const EVT PtrVT, TLSModel::Model model,
18029 bool is64Bit, bool isPIC) {
18032 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18033 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18034 is64Bit ? 257 : 256));
18036 SDValue ThreadPointer =
18037 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18038 MachinePointerInfo(Ptr));
18040 unsigned char OperandFlags = 0;
18041 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18043 unsigned WrapperKind = X86ISD::Wrapper;
18044 if (model == TLSModel::LocalExec) {
18045 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18046 } else if (model == TLSModel::InitialExec) {
18048 OperandFlags = X86II::MO_GOTTPOFF;
18049 WrapperKind = X86ISD::WrapperRIP;
18051 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18054 llvm_unreachable("Unexpected model");
18057 // emit "addl x@ntpoff,%eax" (local exec)
18058 // or "addl x@indntpoff,%eax" (initial exec)
18059 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18061 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18062 GA->getOffset(), OperandFlags);
18063 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18065 if (model == TLSModel::InitialExec) {
18066 if (isPIC && !is64Bit) {
18067 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18068 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18072 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18073 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18076 // The address of the thread local variable is the add of the thread
18077 // pointer with the offset of the variable.
18078 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18082 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18084 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18086 if (DAG.getTarget().useEmulatedTLS())
18087 return LowerToTLSEmulatedModel(GA, DAG);
18089 const GlobalValue *GV = GA->getGlobal();
18090 auto PtrVT = getPointerTy(DAG.getDataLayout());
18091 bool PositionIndependent = isPositionIndependent();
18093 if (Subtarget.isTargetELF()) {
18094 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18096 case TLSModel::GeneralDynamic:
18097 if (Subtarget.is64Bit())
18098 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18099 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18100 case TLSModel::LocalDynamic:
18101 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18102 Subtarget.is64Bit());
18103 case TLSModel::InitialExec:
18104 case TLSModel::LocalExec:
18105 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18106 PositionIndependent);
18108 llvm_unreachable("Unknown TLS model.");
18111 if (Subtarget.isTargetDarwin()) {
18112 // Darwin only has one model of TLS. Lower to that.
18113 unsigned char OpFlag = 0;
18114 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18115 X86ISD::WrapperRIP : X86ISD::Wrapper;
18117 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18118 // global base reg.
18119 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18121 OpFlag = X86II::MO_TLVP_PIC_BASE;
18123 OpFlag = X86II::MO_TLVP;
18125 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18126 GA->getValueType(0),
18127 GA->getOffset(), OpFlag);
18128 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18130 // With PIC32, the address is actually $g + Offset.
18132 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18133 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18136 // Lowering the machine isd will make sure everything is in the right
18138 SDValue Chain = DAG.getEntryNode();
18139 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18140 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18141 SDValue Args[] = { Chain, Offset };
18142 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18143 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18144 DAG.getIntPtrConstant(0, DL, true),
18145 Chain.getValue(1), DL);
18147 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18148 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18149 MFI.setAdjustsStack(true);
18151 // And our return value (tls address) is in the standard call return value
18153 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18154 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18157 if (Subtarget.isOSWindows()) {
18158 // Just use the implicit TLS architecture
18159 // Need to generate something similar to:
18160 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18162 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18163 // mov rcx, qword [rdx+rcx*8]
18164 // mov eax, .tls$:tlsvar
18165 // [rax+rcx] contains the address
18166 // Windows 64bit: gs:0x58
18167 // Windows 32bit: fs:__tls_array
18170 SDValue Chain = DAG.getEntryNode();
18172 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18173 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18174 // use its literal value of 0x2C.
18175 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18176 ? Type::getInt8PtrTy(*DAG.getContext(),
18178 : Type::getInt32PtrTy(*DAG.getContext(),
18181 SDValue TlsArray = Subtarget.is64Bit()
18182 ? DAG.getIntPtrConstant(0x58, dl)
18183 : (Subtarget.isTargetWindowsGNU()
18184 ? DAG.getIntPtrConstant(0x2C, dl)
18185 : DAG.getExternalSymbol("_tls_array", PtrVT));
18187 SDValue ThreadPointer =
18188 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18191 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18192 res = ThreadPointer;
18194 // Load the _tls_index variable
18195 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18196 if (Subtarget.is64Bit())
18197 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18198 MachinePointerInfo(), MVT::i32);
18200 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18202 auto &DL = DAG.getDataLayout();
18204 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18205 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18207 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18210 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18212 // Get the offset of start of .tls section
18213 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18214 GA->getValueType(0),
18215 GA->getOffset(), X86II::MO_SECREL);
18216 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18218 // The address of the thread local variable is the add of the thread
18219 // pointer with the offset of the variable.
18220 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18223 llvm_unreachable("TLS not implemented for this target.");
18226 /// Lower SRA_PARTS and friends, which return two i32 values
18227 /// and take a 2 x i32 value to shift plus a shift amount.
18228 /// TODO: Can this be moved to general expansion code?
18229 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18230 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
18231 MVT VT = Op.getSimpleValueType();
18232 unsigned VTBits = VT.getSizeInBits();
18234 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
18235 SDValue ShOpLo = Op.getOperand(0);
18236 SDValue ShOpHi = Op.getOperand(1);
18237 SDValue ShAmt = Op.getOperand(2);
18238 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
18239 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
18241 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18242 DAG.getConstant(VTBits - 1, dl, MVT::i8));
18243 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
18244 DAG.getConstant(VTBits - 1, dl, MVT::i8))
18245 : DAG.getConstant(0, dl, VT);
18247 SDValue Tmp2, Tmp3;
18248 if (Op.getOpcode() == ISD::SHL_PARTS) {
18249 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
18250 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
18252 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
18253 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
18256 // If the shift amount is larger or equal than the width of a part we can't
18257 // rely on the results of shld/shrd. Insert a test and select the appropriate
18258 // values for large shift amounts.
18259 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18260 DAG.getConstant(VTBits, dl, MVT::i8));
18261 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
18262 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
18265 if (Op.getOpcode() == ISD::SHL_PARTS) {
18266 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18267 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18269 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18270 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18273 return DAG.getMergeValues({ Lo, Hi }, dl);
18276 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
18277 SelectionDAG &DAG) {
18278 MVT VT = Op.getSimpleValueType();
18279 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
18280 "Unexpected funnel shift opcode!");
18283 SDValue Op0 = Op.getOperand(0);
18284 SDValue Op1 = Op.getOperand(1);
18285 SDValue Amt = Op.getOperand(2);
18287 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
18289 if (VT.isVector()) {
18290 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
18293 std::swap(Op0, Op1);
18295 APInt APIntShiftAmt;
18296 if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
18297 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
18298 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
18299 Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
18302 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
18306 assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
18307 "Unexpected funnel shift type!");
18309 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
18310 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
18311 if (!OptForSize && Subtarget.isSHLDSlow())
18315 std::swap(Op0, Op1);
18317 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
18318 if (VT == MVT::i16)
18319 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
18320 DAG.getConstant(15, DL, Amt.getValueType()));
18322 unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
18323 return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
18326 // Try to use a packed vector operation to handle i64 on 32-bit targets when
18327 // AVX512DQ is enabled.
18328 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18329 const X86Subtarget &Subtarget) {
18330 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18331 Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
18332 SDValue Src = Op.getOperand(0);
18333 MVT SrcVT = Src.getSimpleValueType();
18334 MVT VT = Op.getSimpleValueType();
18336 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18337 (VT != MVT::f32 && VT != MVT::f64))
18340 // Pack the i64 into a vector, do the operation and extract.
18342 // Using 256-bit to ensure result is 128-bits for f32 case.
18343 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18344 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18345 MVT VecVT = MVT::getVectorVT(VT, NumElts);
18348 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18349 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18350 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18351 DAG.getIntPtrConstant(0, dl));
18354 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18355 const X86Subtarget &Subtarget) {
18357 case ISD::SINT_TO_FP:
18358 // TODO: Handle wider types with AVX/AVX512.
18359 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18361 // CVTDQ2PS or (V)CVTDQ2PD
18362 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18364 case ISD::UINT_TO_FP:
18365 // TODO: Handle wider types and i64 elements.
18366 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18368 // VCVTUDQ2PS or VCVTUDQ2PD
18369 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18376 /// Given a scalar cast operation that is extracted from a vector, try to
18377 /// vectorize the cast op followed by extraction. This will avoid an expensive
18378 /// round-trip between XMM and GPR.
18379 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18380 const X86Subtarget &Subtarget) {
18381 // TODO: This could be enhanced to handle smaller integer types by peeking
18382 // through an extend.
18383 SDValue Extract = Cast.getOperand(0);
18384 MVT DestVT = Cast.getSimpleValueType();
18385 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18386 !isa<ConstantSDNode>(Extract.getOperand(1)))
18389 // See if we have a 128-bit vector cast op for this type of cast.
18390 SDValue VecOp = Extract.getOperand(0);
18391 MVT FromVT = VecOp.getSimpleValueType();
18392 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18393 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18394 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18395 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18398 // If we are extracting from a non-zero element, first shuffle the source
18399 // vector to allow extracting from element zero.
18401 if (!isNullConstant(Extract.getOperand(1))) {
18402 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18403 Mask[0] = Extract.getConstantOperandVal(1);
18404 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18406 // If the source vector is wider than 128-bits, extract the low part. Do not
18407 // create an unnecessarily wide vector cast op.
18408 if (FromVT != Vec128VT)
18409 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18411 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18412 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18413 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18414 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18415 DAG.getIntPtrConstant(0, DL));
18418 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
18419 SelectionDAG &DAG) const {
18420 SDValue Src = Op.getOperand(0);
18421 MVT SrcVT = Src.getSimpleValueType();
18422 MVT VT = Op.getSimpleValueType();
18425 if (VT == MVT::f128)
18426 return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
18428 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18431 if (SrcVT.isVector()) {
18432 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
18433 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
18434 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18435 DAG.getUNDEF(SrcVT)));
18440 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
18441 "Unknown SINT_TO_FP to lower!");
18443 // These are really Legal; return the operand so the caller accepts it as
18445 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
18447 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
18450 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18453 SDValue ValueToStore = Op.getOperand(0);
18454 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
18455 !Subtarget.is64Bit())
18456 // Bitcasting to f64 here allows us to do a single 64-bit store from
18457 // an SSE register, avoiding the store forwarding penalty that would come
18458 // with two 32-bit stores.
18459 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18461 unsigned Size = SrcVT.getSizeInBits()/8;
18462 MachineFunction &MF = DAG.getMachineFunction();
18463 auto PtrVT = getPointerTy(MF.getDataLayout());
18464 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18465 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18466 SDValue Chain = DAG.getStore(
18467 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18468 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18469 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18472 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18474 SelectionDAG &DAG) const {
18478 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18480 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18482 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18484 unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18486 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18487 MachineMemOperand *LoadMMO;
18489 int SSFI = FI->getIndex();
18490 LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18491 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18492 MachineMemOperand::MOLoad, ByteSize, ByteSize);
18494 LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18495 StackSlot = StackSlot.getOperand(1);
18497 SDValue FILDOps[] = {Chain, StackSlot};
18499 DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18500 Tys, FILDOps, SrcVT, LoadMMO);
18503 Chain = Result.getValue(1);
18504 SDValue InFlag = Result.getValue(2);
18506 // FIXME: Currently the FST is glued to the FILD_FLAG. This
18507 // shouldn't be necessary except that RFP cannot be live across
18508 // multiple blocks. When stackifier is fixed, they can be uncoupled.
18509 MachineFunction &MF = DAG.getMachineFunction();
18510 unsigned SSFISize = Op.getValueSizeInBits() / 8;
18511 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18512 auto PtrVT = getPointerTy(MF.getDataLayout());
18513 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18514 Tys = DAG.getVTList(MVT::Other);
18515 SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18516 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18517 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18518 MachineMemOperand::MOStore, SSFISize, SSFISize);
18520 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18521 Op.getValueType(), StoreMMO);
18522 Result = DAG.getLoad(
18523 Op.getValueType(), DL, Chain, StackSlot,
18524 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18530 /// Horizontal vector math instructions may be slower than normal math with
18531 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
18532 /// implementation, and likely shuffle complexity of the alternate sequence.
18533 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
18534 const X86Subtarget &Subtarget) {
18535 bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
18536 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
18537 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
18540 /// 64-bit unsigned integer to double expansion.
18541 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18542 const X86Subtarget &Subtarget) {
18543 // This algorithm is not obvious. Here it is what we're trying to output:
18546 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18547 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18549 haddpd %xmm0, %xmm0
18551 pshufd $0x4e, %xmm0, %xmm1
18557 LLVMContext *Context = DAG.getContext();
18559 // Build some magic constants.
18560 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18561 Constant *C0 = ConstantDataVector::get(*Context, CV0);
18562 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18563 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18565 SmallVector<Constant*,2> CV1;
18567 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18568 APInt(64, 0x4330000000000000ULL))));
18570 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18571 APInt(64, 0x4530000000000000ULL))));
18572 Constant *C1 = ConstantVector::get(CV1);
18573 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18575 // Load the 64-bit value into an XMM register.
18576 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
18579 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18580 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18581 /* Alignment = */ 16);
18583 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18586 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18587 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18588 /* Alignment = */ 16);
18589 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18590 // TODO: Are there any fast-math-flags to propagate here?
18591 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18594 if (Subtarget.hasSSE3() && shouldUseHorizontalOp(true, DAG, Subtarget)) {
18595 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18597 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18598 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
18601 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
18602 DAG.getIntPtrConstant(0, dl));
18605 /// 32-bit unsigned integer to float expansion.
18606 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
18607 const X86Subtarget &Subtarget) {
18609 // FP constant to bias correct the final result.
18610 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
18613 // Load the 32-bit value into an XMM register.
18614 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
18617 // Zero out the upper parts of the register.
18618 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
18620 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18621 DAG.getBitcast(MVT::v2f64, Load),
18622 DAG.getIntPtrConstant(0, dl));
18624 // Or the load with the bias.
18625 SDValue Or = DAG.getNode(
18626 ISD::OR, dl, MVT::v2i64,
18627 DAG.getBitcast(MVT::v2i64,
18628 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
18629 DAG.getBitcast(MVT::v2i64,
18630 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
18632 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18633 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
18635 // Subtract the bias.
18636 // TODO: Are there any fast-math-flags to propagate here?
18637 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
18639 // Handle final rounding.
18640 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
18643 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
18644 const X86Subtarget &Subtarget,
18646 if (Op.getSimpleValueType() != MVT::v2f64)
18649 SDValue N0 = Op.getOperand(0);
18650 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
18652 // Legalize to v4i32 type.
18653 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
18654 DAG.getUNDEF(MVT::v2i32));
18656 if (Subtarget.hasAVX512())
18657 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
18659 // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
18660 // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
18661 SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
18662 SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
18664 // Two to the power of half-word-size.
18665 SDValue TWOHW = DAG.getConstantFP((double)(1 << 16), DL, MVT::v2f64);
18667 // Clear upper part of LO, lower HI.
18668 SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
18669 SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
18671 SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
18672 fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
18673 SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
18675 // Add the two halves.
18676 return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
18679 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
18680 const X86Subtarget &Subtarget) {
18681 // The algorithm is the following:
18682 // #ifdef __SSE4_1__
18683 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18684 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18685 // (uint4) 0x53000000, 0xaa);
18687 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18688 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18690 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18691 // return (float4) lo + fhi;
18693 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
18694 // reassociate the two FADDs, and if we do that, the algorithm fails
18695 // spectacularly (PR24512).
18696 // FIXME: If we ever have some kind of Machine FMF, this should be marked
18697 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
18698 // there's also the MachineCombiner reassociations happening on Machine IR.
18699 if (DAG.getTarget().Options.UnsafeFPMath)
18703 SDValue V = Op->getOperand(0);
18704 MVT VecIntVT = V.getSimpleValueType();
18705 bool Is128 = VecIntVT == MVT::v4i32;
18706 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
18707 // If we convert to something else than the supported type, e.g., to v4f64,
18709 if (VecFloatVT != Op->getSimpleValueType(0))
18712 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
18713 "Unsupported custom type");
18715 // In the #idef/#else code, we have in common:
18716 // - The vector of constants:
18722 // Create the splat vector for 0x4b000000.
18723 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
18724 // Create the splat vector for 0x53000000.
18725 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
18727 // Create the right shift.
18728 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
18729 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
18732 if (Subtarget.hasSSE41()) {
18733 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
18734 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18735 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
18736 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
18737 // Low will be bitcasted right away, so do not bother bitcasting back to its
18739 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
18740 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
18741 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18742 // (uint4) 0x53000000, 0xaa);
18743 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
18744 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
18745 // High will be bitcasted right away, so do not bother bitcasting back to
18746 // its original type.
18747 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
18748 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
18750 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
18751 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18752 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
18753 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
18755 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18756 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
18759 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
18760 SDValue VecCstFAdd = DAG.getConstantFP(
18761 APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
18763 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18764 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
18765 // TODO: Are there any fast-math-flags to propagate here?
18767 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
18768 // return (float4) lo + fhi;
18769 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
18770 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
18773 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
18774 const X86Subtarget &Subtarget) {
18775 SDValue N0 = Op.getOperand(0);
18776 MVT SrcVT = N0.getSimpleValueType();
18779 switch (SrcVT.SimpleTy) {
18781 llvm_unreachable("Custom UINT_TO_FP is not supported!");
18783 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
18786 assert(!Subtarget.hasAVX512());
18787 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
18791 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
18792 SelectionDAG &DAG) const {
18793 SDValue N0 = Op.getOperand(0);
18795 auto PtrVT = getPointerTy(DAG.getDataLayout());
18796 MVT SrcVT = N0.getSimpleValueType();
18797 MVT DstVT = Op.getSimpleValueType();
18799 if (DstVT == MVT::f128)
18800 return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
18802 if (DstVT.isVector())
18803 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
18805 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18808 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
18809 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
18810 // Conversions from unsigned i32 to f32/f64 are legal,
18811 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
18815 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
18816 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
18817 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, N0);
18818 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, N0);
18821 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18824 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
18825 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
18826 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
18827 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
18828 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
18831 // Make a 64-bit buffer, and use it to build an FILD.
18832 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
18833 if (SrcVT == MVT::i32) {
18834 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
18835 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
18836 StackSlot, MachinePointerInfo());
18837 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
18838 OffsetSlot, MachinePointerInfo());
18839 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
18843 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
18844 SDValue ValueToStore = Op.getOperand(0);
18845 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
18846 // Bitcasting to f64 here allows us to do a single 64-bit store from
18847 // an SSE register, avoiding the store forwarding penalty that would come
18848 // with two 32-bit stores.
18849 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18850 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18851 MachinePointerInfo());
18852 // For i64 source, we need to add the appropriate power of 2 if the input
18853 // was negative. This is the same as the optimization in
18854 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
18855 // we must be careful to do the computation in x87 extended precision, not
18856 // in SSE. (The generic code can't know it's OK to do this, or how to.)
18857 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
18858 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
18859 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18860 MachineMemOperand::MOLoad, 8, 8);
18862 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
18863 SDValue Ops[] = { Store, StackSlot };
18864 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
18867 APInt FF(32, 0x5F800000ULL);
18869 // Check whether the sign bit is set.
18870 SDValue SignSet = DAG.getSetCC(
18871 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
18872 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
18874 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
18875 SDValue FudgePtr = DAG.getConstantPool(
18876 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
18878 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
18879 SDValue Zero = DAG.getIntPtrConstant(0, dl);
18880 SDValue Four = DAG.getIntPtrConstant(4, dl);
18881 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
18882 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
18884 // Load the value out, extending it from f32 to f80.
18885 // FIXME: Avoid the extend by constructing the right constant pool?
18886 SDValue Fudge = DAG.getExtLoad(
18887 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
18888 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
18889 /* Alignment = */ 4);
18890 // Extend everything to 80 bits to force it to be done on x87.
18891 // TODO: Are there any fast-math-flags to propagate here?
18892 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
18893 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
18894 DAG.getIntPtrConstant(0, dl));
18897 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
18898 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
18899 // just return an SDValue().
18900 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
18901 // to i16, i32 or i64, and we lower it to a legal sequence and return the
18904 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
18905 bool IsSigned) const {
18908 EVT DstTy = Op.getValueType();
18909 EVT TheVT = Op.getOperand(0).getValueType();
18910 auto PtrVT = getPointerTy(DAG.getDataLayout());
18912 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
18913 // f16 must be promoted before using the lowering in this routine.
18914 // fp128 does not use this lowering.
18918 // If using FIST to compute an unsigned i64, we'll need some fixup
18919 // to handle values above the maximum signed i64. A FIST is always
18920 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
18921 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
18923 if (!IsSigned && DstTy != MVT::i64) {
18924 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
18925 // The low 32 bits of the fist result will have the correct uint32 result.
18926 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
18930 assert(DstTy.getSimpleVT() <= MVT::i64 &&
18931 DstTy.getSimpleVT() >= MVT::i16 &&
18932 "Unknown FP_TO_INT to lower!");
18934 // We lower FP->int64 into FISTP64 followed by a load from a temporary
18936 MachineFunction &MF = DAG.getMachineFunction();
18937 unsigned MemSize = DstTy.getStoreSize();
18938 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
18939 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18941 SDValue Chain = DAG.getEntryNode();
18942 SDValue Value = Op.getOperand(0);
18943 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
18945 if (UnsignedFixup) {
18947 // Conversion to unsigned i64 is implemented with a select,
18948 // depending on whether the source value fits in the range
18949 // of a signed i64. Let Thresh be the FP equivalent of
18950 // 0x8000000000000000ULL.
18952 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
18953 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
18954 // Fist-to-mem64 FistSrc
18955 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
18956 // to XOR'ing the high 32 bits with Adjust.
18958 // Being a power of 2, Thresh is exactly representable in all FP formats.
18959 // For X87 we'd like to use the smallest FP type for this constant, but
18960 // for DAG type consistency we have to match the FP operand type.
18962 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
18963 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
18964 bool LosesInfo = false;
18965 if (TheVT == MVT::f64)
18966 // The rounding mode is irrelevant as the conversion should be exact.
18967 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
18969 else if (TheVT == MVT::f80)
18970 Status = Thresh.convert(APFloat::x87DoubleExtended(),
18971 APFloat::rmNearestTiesToEven, &LosesInfo);
18973 assert(Status == APFloat::opOK && !LosesInfo &&
18974 "FP conversion should have been exact");
18976 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
18978 SDValue Cmp = DAG.getSetCC(DL,
18979 getSetCCResultType(DAG.getDataLayout(),
18980 *DAG.getContext(), TheVT),
18981 Value, ThreshVal, ISD::SETLT);
18982 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
18983 DAG.getConstant(0, DL, MVT::i64),
18984 DAG.getConstant(APInt::getSignMask(64),
18986 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
18987 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
18988 *DAG.getContext(), TheVT),
18989 Value, ThreshVal, ISD::SETLT);
18990 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
18993 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
18995 // FIXME This causes a redundant load/store if the SSE-class value is already
18996 // in memory, such as if it is on the callstack.
18997 if (isScalarFPTypeInSSEReg(TheVT)) {
18998 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
18999 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19000 SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
19001 SDValue Ops[] = { Chain, StackSlot };
19003 unsigned FLDSize = TheVT.getStoreSize();
19004 assert(FLDSize <= MemSize && "Stack slot not big enough");
19005 MachineMemOperand *MMO = MF.getMachineMemOperand(
19006 MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
19007 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19008 Chain = Value.getValue(1);
19011 // Build the FP_TO_INT*_IN_MEM
19012 MachineMemOperand *MMO = MF.getMachineMemOperand(
19013 MPI, MachineMemOperand::MOStore, MemSize, MemSize);
19014 SDValue Ops[] = { Chain, Value, StackSlot };
19015 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19016 DAG.getVTList(MVT::Other),
19019 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19021 // If we need an unsigned fixup, XOR the result with adjust.
19023 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19028 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19029 const X86Subtarget &Subtarget) {
19030 MVT VT = Op.getSimpleValueType();
19031 SDValue In = Op.getOperand(0);
19032 MVT InVT = In.getSimpleValueType();
19034 unsigned Opc = Op.getOpcode();
19036 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19037 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19038 "Unexpected extension opcode");
19039 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
19040 "Expected same number of elements");
19041 assert((VT.getVectorElementType() == MVT::i16 ||
19042 VT.getVectorElementType() == MVT::i32 ||
19043 VT.getVectorElementType() == MVT::i64) &&
19044 "Unexpected element type");
19045 assert((InVT.getVectorElementType() == MVT::i8 ||
19046 InVT.getVectorElementType() == MVT::i16 ||
19047 InVT.getVectorElementType() == MVT::i32) &&
19048 "Unexpected element type");
19050 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
19052 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
19053 if (InVT == MVT::v8i8) {
19054 if (VT != MVT::v8i64)
19057 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
19058 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
19059 return DAG.getNode(ExtendInVecOpc, dl, VT, In);
19062 if (Subtarget.hasInt256())
19065 // Optimize vectors in AVX mode:
19068 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
19069 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
19070 // Concat upper and lower parts.
19073 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
19074 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
19075 // Concat upper and lower parts.
19077 MVT HalfVT = VT.getHalfNumVectorElementsVT();
19078 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19080 // Short-circuit if we can determine that each 128-bit half is the same value.
19081 // Otherwise, this is difficult to match and optimize.
19082 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19083 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19084 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19086 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
19087 SDValue Undef = DAG.getUNDEF(InVT);
19088 bool NeedZero = Opc == ISD::ZERO_EXTEND;
19089 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
19090 OpHi = DAG.getBitcast(HalfVT, OpHi);
19092 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
19095 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
19096 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
19097 const SDLoc &dl, SelectionDAG &DAG) {
19098 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
19099 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19100 DAG.getIntPtrConstant(0, dl));
19101 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19102 DAG.getIntPtrConstant(8, dl));
19103 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
19104 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
19105 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
19106 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19109 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
19110 const X86Subtarget &Subtarget,
19111 SelectionDAG &DAG) {
19112 MVT VT = Op->getSimpleValueType(0);
19113 SDValue In = Op->getOperand(0);
19114 MVT InVT = In.getSimpleValueType();
19115 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
19117 unsigned NumElts = VT.getVectorNumElements();
19119 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
19120 // avoids a constant pool load.
19121 if (VT.getVectorElementType() != MVT::i8) {
19122 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
19123 return DAG.getNode(ISD::SRL, DL, VT, Extend,
19124 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
19127 // Extend VT if BWI is not supported.
19129 if (!Subtarget.hasBWI()) {
19130 // If v16i32 is to be avoided, we'll need to split and concatenate.
19131 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
19132 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
19134 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
19137 // Widen to 512-bits if VLX is not supported.
19138 MVT WideVT = ExtVT;
19139 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
19140 NumElts *= 512 / ExtVT.getSizeInBits();
19141 InVT = MVT::getVectorVT(MVT::i1, NumElts);
19142 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
19143 In, DAG.getIntPtrConstant(0, DL));
19144 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
19148 SDValue One = DAG.getConstant(1, DL, WideVT);
19149 SDValue Zero = DAG.getConstant(0, DL, WideVT);
19151 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
19153 // Truncate if we had to extend above.
19155 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
19156 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
19159 // Extract back to 128/256-bit if we widened.
19161 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
19162 DAG.getIntPtrConstant(0, DL));
19164 return SelectedVal;
19167 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
19168 SelectionDAG &DAG) {
19169 SDValue In = Op.getOperand(0);
19170 MVT SVT = In.getSimpleValueType();
19172 if (SVT.getVectorElementType() == MVT::i1)
19173 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
19175 assert(Subtarget.hasAVX() && "Expected AVX support");
19176 return LowerAVXExtend(Op, DAG, Subtarget);
19179 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
19180 /// It makes use of the fact that vectors with enough leading sign/zero bits
19181 /// prevent the PACKSS/PACKUS from saturating the results.
19182 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
19183 /// within each 128-bit lane.
19184 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
19185 const SDLoc &DL, SelectionDAG &DAG,
19186 const X86Subtarget &Subtarget) {
19187 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
19188 "Unexpected PACK opcode");
19189 assert(DstVT.isVector() && "VT not a vector?");
19191 // Requires SSE2 but AVX512 has fast vector truncate.
19192 if (!Subtarget.hasSSE2())
19195 EVT SrcVT = In.getValueType();
19197 // No truncation required, we might get here due to recursive calls.
19198 if (SrcVT == DstVT)
19201 // We only support vector truncation to 64bits or greater from a
19202 // 128bits or greater source.
19203 unsigned DstSizeInBits = DstVT.getSizeInBits();
19204 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
19205 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
19208 unsigned NumElems = SrcVT.getVectorNumElements();
19209 if (!isPowerOf2_32(NumElems))
19212 LLVMContext &Ctx = *DAG.getContext();
19213 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
19214 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
19216 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
19218 // Pack to the largest type possible:
19219 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
19220 EVT InVT = MVT::i16, OutVT = MVT::i8;
19221 if (SrcVT.getScalarSizeInBits() > 16 &&
19222 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
19227 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
19228 if (SrcVT.is128BitVector()) {
19229 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
19230 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
19231 In = DAG.getBitcast(InVT, In);
19232 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
19233 Res = extractSubVector(Res, 0, DAG, DL, 64);
19234 return DAG.getBitcast(DstVT, Res);
19237 // Extract lower/upper subvectors.
19238 unsigned NumSubElts = NumElems / 2;
19239 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19240 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19242 unsigned SubSizeInBits = SrcSizeInBits / 2;
19243 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
19244 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
19246 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
19247 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
19248 Lo = DAG.getBitcast(InVT, Lo);
19249 Hi = DAG.getBitcast(InVT, Hi);
19250 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19251 return DAG.getBitcast(DstVT, Res);
19254 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
19255 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
19256 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
19257 Lo = DAG.getBitcast(InVT, Lo);
19258 Hi = DAG.getBitcast(InVT, Hi);
19259 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19261 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
19262 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
19263 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
19264 SmallVector<int, 64> Mask;
19265 int Scale = 64 / OutVT.getScalarSizeInBits();
19266 scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
19267 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
19269 if (DstVT.is256BitVector())
19270 return DAG.getBitcast(DstVT, Res);
19272 // If 512bit -> 128bit truncate another stage.
19273 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19274 Res = DAG.getBitcast(PackedVT, Res);
19275 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19278 // Recursively pack lower/upper subvectors, concat result and pack again.
19279 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
19280 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
19281 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
19282 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
19284 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19285 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
19286 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19289 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
19290 const X86Subtarget &Subtarget) {
19293 MVT VT = Op.getSimpleValueType();
19294 SDValue In = Op.getOperand(0);
19295 MVT InVT = In.getSimpleValueType();
19297 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
19299 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
19300 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
19301 if (InVT.getScalarSizeInBits() <= 16) {
19302 if (Subtarget.hasBWI()) {
19303 // legal, will go to VPMOVB2M, VPMOVW2M
19304 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19305 // We need to shift to get the lsb into sign position.
19306 // Shift packed bytes not supported natively, bitcast to word
19307 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
19308 In = DAG.getNode(ISD::SHL, DL, ExtVT,
19309 DAG.getBitcast(ExtVT, In),
19310 DAG.getConstant(ShiftInx, DL, ExtVT));
19311 In = DAG.getBitcast(InVT, In);
19313 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
19316 // Use TESTD/Q, extended vector to packed dword/qword.
19317 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
19318 "Unexpected vector type.");
19319 unsigned NumElts = InVT.getVectorNumElements();
19320 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
19321 // We need to change to a wider element type that we have support for.
19322 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
19323 // For 16 element vectors we extend to v16i32 unless we are explicitly
19324 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
19325 // we need to split into two 8 element vectors which we can extend to v8i32,
19326 // truncate and concat the results. There's an additional complication if
19327 // the original type is v16i8. In that case we can't split the v16i8 so
19328 // first we pre-extend it to v16i16 which we can split to v8i16, then extend
19329 // to v8i32, truncate that to v8i1 and concat the two halves.
19330 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
19331 if (InVT == MVT::v16i8) {
19332 // First we need to sign extend up to 256-bits so we can split that.
19333 InVT = MVT::v16i16;
19334 In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
19336 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
19337 SDValue Hi = extract128BitVector(In, 8, DAG, DL);
19338 // We're split now, just emit two truncates and a concat. The two
19339 // truncates will trigger legalization to come back to this function.
19340 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
19341 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
19342 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19344 // We either have 8 elements or we're allowed to use 512-bit vectors.
19345 // If we have VLX, we want to use the narrowest vector that can get the
19346 // job done so we use vXi32.
19347 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
19348 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
19349 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
19351 ShiftInx = InVT.getScalarSizeInBits() - 1;
19354 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19355 // We need to shift to get the lsb into sign position.
19356 In = DAG.getNode(ISD::SHL, DL, InVT, In,
19357 DAG.getConstant(ShiftInx, DL, InVT));
19359 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
19360 if (Subtarget.hasDQI())
19361 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
19362 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
19365 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
19367 MVT VT = Op.getSimpleValueType();
19368 SDValue In = Op.getOperand(0);
19369 MVT InVT = In.getSimpleValueType();
19370 unsigned InNumEltBits = InVT.getScalarSizeInBits();
19372 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19373 "Invalid TRUNCATE operation");
19375 // If we're called by the type legalizer, handle a few cases.
19376 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19377 if (!TLI.isTypeLegal(InVT)) {
19378 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
19379 VT.is128BitVector()) {
19380 assert(Subtarget.hasVLX() && "Unexpected subtarget!");
19381 // The default behavior is to truncate one step, concatenate, and then
19382 // truncate the remainder. We'd rather produce two 64-bit results and
19383 // concatenate those.
19385 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
19388 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
19390 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
19391 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
19392 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19395 // Otherwise let default legalization handle it.
19399 if (VT.getVectorElementType() == MVT::i1)
19400 return LowerTruncateVecI1(Op, DAG, Subtarget);
19402 // vpmovqb/w/d, vpmovdb/w, vpmovwb
19403 if (Subtarget.hasAVX512()) {
19404 // word to byte only under BWI. Otherwise we have to promoted to v16i32
19405 // and then truncate that. But we should only do that if we haven't been
19406 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
19407 // handled by isel patterns.
19408 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
19409 Subtarget.canExtendTo512DQ())
19413 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
19414 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
19416 // Truncate with PACKUS if we are truncating a vector with leading zero bits
19417 // that extend all the way to the packed/truncated value.
19418 // Pre-SSE41 we can only use PACKUSWB.
19419 KnownBits Known = DAG.computeKnownBits(In);
19420 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
19422 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
19425 // Truncate with PACKSS if we are truncating a vector with sign-bits that
19426 // extend all the way to the packed/truncated value.
19427 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
19429 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
19432 // Handle truncation of V256 to V128 using shuffles.
19433 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19435 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
19436 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
19437 if (Subtarget.hasInt256()) {
19438 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
19439 In = DAG.getBitcast(MVT::v8i32, In);
19440 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
19441 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
19442 DAG.getIntPtrConstant(0, DL));
19445 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19446 DAG.getIntPtrConstant(0, DL));
19447 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19448 DAG.getIntPtrConstant(2, DL));
19449 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19450 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19451 static const int ShufMask[] = {0, 2, 4, 6};
19452 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
19455 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
19456 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
19457 if (Subtarget.hasInt256()) {
19458 In = DAG.getBitcast(MVT::v32i8, In);
19460 // The PSHUFB mask:
19461 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
19462 -1, -1, -1, -1, -1, -1, -1, -1,
19463 16, 17, 20, 21, 24, 25, 28, 29,
19464 -1, -1, -1, -1, -1, -1, -1, -1 };
19465 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
19466 In = DAG.getBitcast(MVT::v4i64, In);
19468 static const int ShufMask2[] = {0, 2, -1, -1};
19469 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
19470 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19471 DAG.getIntPtrConstant(0, DL));
19472 return DAG.getBitcast(VT, In);
19475 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19476 DAG.getIntPtrConstant(0, DL));
19478 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19479 DAG.getIntPtrConstant(4, DL));
19481 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
19482 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
19484 // The PSHUFB mask:
19485 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
19486 -1, -1, -1, -1, -1, -1, -1, -1};
19488 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
19489 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
19491 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19492 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19494 // The MOVLHPS Mask:
19495 static const int ShufMask2[] = {0, 1, 4, 5};
19496 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
19497 return DAG.getBitcast(MVT::v8i16, res);
19500 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
19501 // Use an AND to zero uppper bits for PACKUS.
19502 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
19504 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19505 DAG.getIntPtrConstant(0, DL));
19506 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19507 DAG.getIntPtrConstant(8, DL));
19508 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
19511 llvm_unreachable("All 256->128 cases should have been handled above!");
19514 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
19515 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
19516 MVT VT = Op.getSimpleValueType();
19517 SDValue Src = Op.getOperand(0);
19518 MVT SrcVT = Src.getSimpleValueType();
19521 if (SrcVT == MVT::f128) {
19523 if (Op.getOpcode() == ISD::FP_TO_SINT)
19524 LC = RTLIB::getFPTOSINT(SrcVT, VT);
19526 LC = RTLIB::getFPTOUINT(SrcVT, VT);
19528 MakeLibCallOptions CallOptions;
19529 return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
19532 if (VT.isVector()) {
19533 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
19534 MVT ResVT = MVT::v4i32;
19535 MVT TruncVT = MVT::v4i1;
19536 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
19537 if (!IsSigned && !Subtarget.hasVLX()) {
19538 // Widen to 512-bits.
19539 ResVT = MVT::v8i32;
19540 TruncVT = MVT::v8i1;
19541 Opc = ISD::FP_TO_UINT;
19542 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
19543 DAG.getUNDEF(MVT::v8f64),
19544 Src, DAG.getIntPtrConstant(0, dl));
19546 SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
19547 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
19548 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
19549 DAG.getIntPtrConstant(0, dl));
19552 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
19553 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
19554 return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
19555 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
19556 DAG.getUNDEF(MVT::v2f32)));
19562 assert(!VT.isVector());
19564 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
19566 if (!IsSigned && UseSSEReg) {
19567 // Conversions from f32/f64 with AVX512 should be legal.
19568 if (Subtarget.hasAVX512())
19571 // Use default expansion for i64.
19572 if (VT == MVT::i64)
19575 assert(VT == MVT::i32 && "Unexpected VT!");
19577 // Promote i32 to i64 and use a signed operation on 64-bit targets.
19578 if (Subtarget.is64Bit()) {
19579 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
19580 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19583 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
19584 // use fisttp which will be handled later.
19585 if (!Subtarget.hasSSE3())
19589 // Promote i16 to i32 if we can use a SSE operation.
19590 if (VT == MVT::i16 && UseSSEReg) {
19591 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
19592 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
19593 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19596 // If this is a SINT_TO_FP using SSEReg we're done.
19597 if (UseSSEReg && IsSigned)
19600 // Fall back to X87.
19601 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
19604 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
19607 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
19609 MVT VT = Op.getSimpleValueType();
19610 SDValue In = Op.getOperand(0);
19611 MVT SVT = In.getSimpleValueType();
19613 if (VT == MVT::f128) {
19614 RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
19615 return LowerF128Call(Op, DAG, LC);
19618 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
19620 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
19621 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
19622 In, DAG.getUNDEF(SVT)));
19625 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
19626 MVT VT = Op.getSimpleValueType();
19627 SDValue In = Op.getOperand(0);
19628 MVT SVT = In.getSimpleValueType();
19630 // It's legal except when f128 is involved
19631 if (SVT != MVT::f128)
19634 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
19636 // FP_ROUND node has a second operand indicating whether it is known to be
19637 // precise. That doesn't take part in the LibCall so we can't directly use
19639 MakeLibCallOptions CallOptions;
19640 return makeLibCall(DAG, LC, VT, In, CallOptions, SDLoc(Op)).first;
19643 // FIXME: This is a hack to allow FP_ROUND to be marked Custom without breaking
19644 // the default expansion of STRICT_FP_ROUND.
19645 static SDValue LowerSTRICT_FP_ROUND(SDValue Op, SelectionDAG &DAG) {
19646 // FIXME: Need to form a libcall with an input chain for f128.
19647 assert(Op.getOperand(0).getValueType() != MVT::f128 &&
19648 "Don't know how to handle f128 yet!");
19652 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19653 /// vector operation in place of the typical scalar operation.
19654 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
19655 const X86Subtarget &Subtarget) {
19656 // If both operands have other uses, this is probably not profitable.
19657 SDValue LHS = Op.getOperand(0);
19658 SDValue RHS = Op.getOperand(1);
19659 if (!LHS.hasOneUse() && !RHS.hasOneUse())
19662 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
19663 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
19664 if (IsFP && !Subtarget.hasSSE3())
19666 if (!IsFP && !Subtarget.hasSSSE3())
19669 // Extract from a common vector.
19670 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19671 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19672 LHS.getOperand(0) != RHS.getOperand(0) ||
19673 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
19674 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
19675 !shouldUseHorizontalOp(true, DAG, Subtarget))
19678 // Allow commuted 'hadd' ops.
19679 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
19681 switch (Op.getOpcode()) {
19682 case ISD::ADD: HOpcode = X86ISD::HADD; break;
19683 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
19684 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
19685 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
19687 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
19689 unsigned LExtIndex = LHS.getConstantOperandVal(1);
19690 unsigned RExtIndex = RHS.getConstantOperandVal(1);
19691 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
19692 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
19693 std::swap(LExtIndex, RExtIndex);
19695 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
19698 SDValue X = LHS.getOperand(0);
19699 EVT VecVT = X.getValueType();
19700 unsigned BitWidth = VecVT.getSizeInBits();
19701 unsigned NumLanes = BitWidth / 128;
19702 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
19703 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
19704 "Not expecting illegal vector widths here");
19706 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
19707 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
19709 if (BitWidth == 256 || BitWidth == 512) {
19710 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
19711 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
19712 LExtIndex %= NumEltsPerLane;
19715 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
19716 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
19717 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
19718 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
19719 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
19720 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
19721 DAG.getIntPtrConstant(LExtIndex / 2, DL));
19724 /// Depending on uarch and/or optimizing for size, we might prefer to use a
19725 /// vector operation in place of the typical scalar operation.
19726 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
19727 if (Op.getValueType() == MVT::f128) {
19728 RTLIB::Libcall LC = Op.getOpcode() == ISD::FADD ? RTLIB::ADD_F128
19730 return LowerF128Call(Op, DAG, LC);
19733 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
19734 "Only expecting float/double");
19735 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
19738 /// The only differences between FABS and FNEG are the mask and the logic op.
19739 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
19740 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
19741 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
19742 "Wrong opcode for lowering FABS or FNEG.");
19744 bool IsFABS = (Op.getOpcode() == ISD::FABS);
19746 // If this is a FABS and it has an FNEG user, bail out to fold the combination
19747 // into an FNABS. We'll lower the FABS after that if it is still in use.
19749 for (SDNode *User : Op->uses())
19750 if (User->getOpcode() == ISD::FNEG)
19754 MVT VT = Op.getSimpleValueType();
19756 bool IsF128 = (VT == MVT::f128);
19757 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19758 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19759 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19760 "Unexpected type in LowerFABSorFNEG");
19762 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
19763 // decide if we should generate a 16-byte constant mask when we only need 4 or
19764 // 8 bytes for the scalar case.
19766 // There are no scalar bitwise logical SSE/AVX instructions, so we
19767 // generate a 16-byte vector constant and logic op even for the scalar case.
19768 // Using a 16-byte mask allows folding the load of the mask with
19769 // the logic op, so it can save (~4 bytes) on code size.
19770 bool IsFakeVector = !VT.isVector() && !IsF128;
19773 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19775 unsigned EltBits = VT.getScalarSizeInBits();
19776 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
19777 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
19778 APInt::getSignMask(EltBits);
19779 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19780 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
19782 SDValue Op0 = Op.getOperand(0);
19783 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
19784 unsigned LogicOp = IsFABS ? X86ISD::FAND :
19785 IsFNABS ? X86ISD::FOR :
19787 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
19789 if (VT.isVector() || IsF128)
19790 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19792 // For the scalar case extend to a 128-bit vector, perform the logic op,
19793 // and extract the scalar result back out.
19794 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
19795 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19796 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
19797 DAG.getIntPtrConstant(0, dl));
19800 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
19801 SDValue Mag = Op.getOperand(0);
19802 SDValue Sign = Op.getOperand(1);
19805 // If the sign operand is smaller, extend it first.
19806 MVT VT = Op.getSimpleValueType();
19807 if (Sign.getSimpleValueType().bitsLT(VT))
19808 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
19810 // And if it is bigger, shrink it first.
19811 if (Sign.getSimpleValueType().bitsGT(VT))
19812 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
19814 // At this point the operands and the result should have the same
19815 // type, and that won't be f80 since that is not custom lowered.
19816 bool IsF128 = (VT == MVT::f128);
19817 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
19818 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
19819 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
19820 "Unexpected type in LowerFCOPYSIGN");
19822 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19824 // Perform all scalar logic operations as 16-byte vectors because there are no
19825 // scalar FP logic instructions in SSE.
19826 // TODO: This isn't necessary. If we used scalar types, we might avoid some
19827 // unnecessary splats, but we might miss load folding opportunities. Should
19828 // this decision be based on OptimizeForSize?
19829 bool IsFakeVector = !VT.isVector() && !IsF128;
19832 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19834 // The mask constants are automatically splatted for vector types.
19835 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19836 SDValue SignMask = DAG.getConstantFP(
19837 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
19838 SDValue MagMask = DAG.getConstantFP(
19839 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
19841 // First, clear all bits but the sign bit from the second operand (sign).
19843 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
19844 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
19846 // Next, clear the sign bit from the first operand (magnitude).
19847 // TODO: If we had general constant folding for FP logic ops, this check
19848 // wouldn't be necessary.
19850 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
19851 APFloat APF = Op0CN->getValueAPF();
19853 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
19855 // If the magnitude operand wasn't a constant, we need to AND out the sign.
19857 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
19858 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
19861 // OR the magnitude value with the sign bit.
19862 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
19863 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
19864 DAG.getIntPtrConstant(0, dl));
19867 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
19868 SDValue N0 = Op.getOperand(0);
19870 MVT VT = Op.getSimpleValueType();
19872 MVT OpVT = N0.getSimpleValueType();
19873 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
19874 "Unexpected type for FGETSIGN");
19876 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
19877 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
19878 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
19879 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
19880 Res = DAG.getZExtOrTrunc(Res, dl, VT);
19881 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
19885 /// Helper for creating a X86ISD::SETCC node.
19886 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
19887 SelectionDAG &DAG) {
19888 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19889 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
19892 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
19893 /// style scalarized (associative) reduction patterns.
19894 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
19895 SmallVectorImpl<SDValue> &SrcOps) {
19896 SmallVector<SDValue, 8> Opnds;
19897 DenseMap<SDValue, APInt> SrcOpMap;
19898 EVT VT = MVT::Other;
19900 // Recognize a special case where a vector is casted into wide integer to
19902 assert(Op.getOpcode() == unsigned(BinOp) &&
19903 "Unexpected bit reduction opcode");
19904 Opnds.push_back(Op.getOperand(0));
19905 Opnds.push_back(Op.getOperand(1));
19907 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
19908 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
19909 // BFS traverse all BinOp operands.
19910 if (I->getOpcode() == unsigned(BinOp)) {
19911 Opnds.push_back(I->getOperand(0));
19912 Opnds.push_back(I->getOperand(1));
19913 // Re-evaluate the number of nodes to be traversed.
19914 e += 2; // 2 more nodes (LHS and RHS) are pushed.
19918 // Quit if a non-EXTRACT_VECTOR_ELT
19919 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19922 // Quit if without a constant index.
19923 SDValue Idx = I->getOperand(1);
19924 if (!isa<ConstantSDNode>(Idx))
19927 SDValue Src = I->getOperand(0);
19928 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
19929 if (M == SrcOpMap.end()) {
19930 VT = Src.getValueType();
19931 // Quit if not the same type.
19932 if (SrcOpMap.begin() != SrcOpMap.end() &&
19933 VT != SrcOpMap.begin()->first.getValueType())
19935 unsigned NumElts = VT.getVectorNumElements();
19936 APInt EltCount = APInt::getNullValue(NumElts);
19937 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
19938 SrcOps.push_back(Src);
19940 // Quit if element already used.
19941 unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
19942 if (M->second[CIdx])
19944 M->second.setBit(CIdx);
19947 // Quit if not all elements are used.
19948 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
19949 E = SrcOpMap.end();
19951 if (!I->second.isAllOnesValue())
19958 // Check whether an OR'd tree is PTEST-able.
19959 static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
19960 const X86Subtarget &Subtarget,
19961 SelectionDAG &DAG, SDValue &X86CC) {
19962 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
19964 if (!Subtarget.hasSSE41() || !Op->hasOneUse())
19967 SmallVector<SDValue, 8> VecIns;
19968 if (!matchScalarReduction(Op, ISD::OR, VecIns))
19971 // Quit if not 128/256-bit vector.
19972 EVT VT = VecIns[0].getValueType();
19973 if (!VT.is128BitVector() && !VT.is256BitVector())
19977 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
19979 // Cast all vectors into TestVT for PTEST.
19980 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
19981 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
19983 // If more than one full vector is evaluated, OR them first before PTEST.
19984 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
19985 // Each iteration will OR 2 nodes and append the result until there is only
19986 // 1 node left, i.e. the final OR'd value of all vectors.
19987 SDValue LHS = VecIns[Slot];
19988 SDValue RHS = VecIns[Slot + 1];
19989 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
19992 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
19994 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
19997 /// return true if \c Op has a use that doesn't just read flags.
19998 static bool hasNonFlagsUse(SDValue Op) {
19999 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
20001 SDNode *User = *UI;
20002 unsigned UOpNo = UI.getOperandNo();
20003 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
20004 // Look pass truncate.
20005 UOpNo = User->use_begin().getOperandNo();
20006 User = *User->use_begin();
20009 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
20010 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
20016 /// Emit nodes that will be selected as "test Op0,Op0", or something
20018 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
20019 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
20020 // CF and OF aren't always set the way we want. Determine which
20021 // of these we need.
20022 bool NeedCF = false;
20023 bool NeedOF = false;
20026 case X86::COND_A: case X86::COND_AE:
20027 case X86::COND_B: case X86::COND_BE:
20030 case X86::COND_G: case X86::COND_GE:
20031 case X86::COND_L: case X86::COND_LE:
20032 case X86::COND_O: case X86::COND_NO: {
20033 // Check if we really need to set the
20034 // Overflow flag. If NoSignedWrap is present
20035 // that is not actually needed.
20036 switch (Op->getOpcode()) {
20041 if (Op.getNode()->getFlags().hasNoSignedWrap())
20051 // See if we can use the EFLAGS value from the operand instead of
20052 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
20053 // we prove that the arithmetic won't overflow, we can't use OF or CF.
20054 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
20055 // Emit a CMP with 0, which is the TEST pattern.
20056 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20057 DAG.getConstant(0, dl, Op.getValueType()));
20059 unsigned Opcode = 0;
20060 unsigned NumOperands = 0;
20062 SDValue ArithOp = Op;
20064 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
20065 // which may be the result of a CAST. We use the variable 'Op', which is the
20066 // non-casted variable when we check for possible users.
20067 switch (ArithOp.getOpcode()) {
20069 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
20070 // because a TEST instruction will be better.
20071 if (!hasNonFlagsUse(Op))
20079 // Transform to an x86-specific ALU node with flags if there is a chance of
20080 // using an RMW op or only the flags are used. Otherwise, leave
20081 // the node alone and emit a 'test' instruction.
20082 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
20083 UE = Op.getNode()->use_end(); UI != UE; ++UI)
20084 if (UI->getOpcode() != ISD::CopyToReg &&
20085 UI->getOpcode() != ISD::SETCC &&
20086 UI->getOpcode() != ISD::STORE)
20089 // Otherwise use a regular EFLAGS-setting instruction.
20090 switch (ArithOp.getOpcode()) {
20091 default: llvm_unreachable("unexpected operator!");
20092 case ISD::ADD: Opcode = X86ISD::ADD; break;
20093 case ISD::SUB: Opcode = X86ISD::SUB; break;
20094 case ISD::XOR: Opcode = X86ISD::XOR; break;
20095 case ISD::AND: Opcode = X86ISD::AND; break;
20096 case ISD::OR: Opcode = X86ISD::OR; break;
20106 return SDValue(Op.getNode(), 1);
20109 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
20110 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20111 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
20112 Op->getOperand(1)).getValue(1);
20120 // Emit a CMP with 0, which is the TEST pattern.
20121 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20122 DAG.getConstant(0, dl, Op.getValueType()));
20124 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20125 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
20127 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
20128 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
20129 return SDValue(New.getNode(), 1);
20132 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
20134 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
20135 const SDLoc &dl, SelectionDAG &DAG) const {
20136 if (isNullConstant(Op1))
20137 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
20139 EVT CmpVT = Op0.getValueType();
20141 if (CmpVT.isFloatingPoint())
20142 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
20144 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
20145 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
20147 // Only promote the compare up to I32 if it is a 16 bit operation
20148 // with an immediate. 16 bit immediates are to be avoided.
20149 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
20150 !DAG.getMachineFunction().getFunction().hasMinSize()) {
20151 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
20152 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
20153 // Don't do this if the immediate can fit in 8-bits.
20154 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
20155 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
20156 unsigned ExtendOp =
20157 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
20158 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
20159 // For equality comparisons try to use SIGN_EXTEND if the input was
20160 // truncate from something with enough sign bits.
20161 if (Op0.getOpcode() == ISD::TRUNCATE) {
20162 SDValue In = Op0.getOperand(0);
20164 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20166 ExtendOp = ISD::SIGN_EXTEND;
20167 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
20168 SDValue In = Op1.getOperand(0);
20170 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20172 ExtendOp = ISD::SIGN_EXTEND;
20177 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
20178 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
20181 // Use SUB instead of CMP to enable CSE between SUB and CMP.
20182 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
20183 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
20184 return Sub.getValue(1);
20187 /// Convert a comparison if required by the subtarget.
20188 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
20189 SelectionDAG &DAG) const {
20190 // If the subtarget does not support the FUCOMI instruction, floating-point
20191 // comparisons have to be converted.
20192 if (Subtarget.hasCMov() ||
20193 Cmp.getOpcode() != X86ISD::CMP ||
20194 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
20195 !Cmp.getOperand(1).getValueType().isFloatingPoint())
20198 // The instruction selector will select an FUCOM instruction instead of
20199 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
20200 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
20201 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
20203 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
20204 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
20205 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
20206 DAG.getConstant(8, dl, MVT::i8));
20207 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
20209 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
20210 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
20211 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
20214 /// Check if replacement of SQRT with RSQRT should be disabled.
20215 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
20216 EVT VT = Op.getValueType();
20218 // We never want to use both SQRT and RSQRT instructions for the same input.
20219 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
20223 return Subtarget.hasFastVectorFSQRT();
20224 return Subtarget.hasFastScalarFSQRT();
20227 /// The minimum architected relative accuracy is 2^-12. We need one
20228 /// Newton-Raphson step to have a good float result (24 bits of precision).
20229 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
20230 SelectionDAG &DAG, int Enabled,
20231 int &RefinementSteps,
20232 bool &UseOneConstNR,
20233 bool Reciprocal) const {
20234 EVT VT = Op.getValueType();
20236 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
20237 // It is likely not profitable to do this for f64 because a double-precision
20238 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
20239 // instructions: convert to single, rsqrtss, convert back to double, refine
20240 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
20241 // along with FMA, this could be a throughput win.
20242 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
20243 // after legalize types.
20244 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20245 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
20246 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
20247 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20248 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20249 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20250 RefinementSteps = 1;
20252 UseOneConstNR = false;
20253 // There is no FSQRT for 512-bits, but there is RSQRT14.
20254 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
20255 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20260 /// The minimum architected relative accuracy is 2^-12. We need one
20261 /// Newton-Raphson step to have a good float result (24 bits of precision).
20262 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
20264 int &RefinementSteps) const {
20265 EVT VT = Op.getValueType();
20267 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
20268 // It is likely not profitable to do this for f64 because a double-precision
20269 // reciprocal estimate with refinement on x86 prior to FMA requires
20270 // 15 instructions: convert to single, rcpss, convert back to double, refine
20271 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
20272 // along with FMA, this could be a throughput win.
20274 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20275 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
20276 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20277 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20278 // Enable estimate codegen with 1 refinement step for vector division.
20279 // Scalar division estimates are disabled because they break too much
20280 // real-world code. These defaults are intended to match GCC behavior.
20281 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
20284 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20285 RefinementSteps = 1;
20287 // There is no FSQRT for 512-bits, but there is RCP14.
20288 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
20289 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20294 /// If we have at least two divisions that use the same divisor, convert to
20295 /// multiplication by a reciprocal. This may need to be adjusted for a given
20296 /// CPU if a division's cost is not at least twice the cost of a multiplication.
20297 /// This is because we still need one division to calculate the reciprocal and
20298 /// then we need two multiplies by that reciprocal as replacements for the
20299 /// original divisions.
20300 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
20305 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
20307 SmallVectorImpl<SDNode *> &Created) const {
20308 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
20309 if (isIntDivCheap(N->getValueType(0), Attr))
20310 return SDValue(N,0); // Lower SDIV as SDIV
20312 assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
20313 "Unexpected divisor!");
20315 // Only perform this transform if CMOV is supported otherwise the select
20316 // below will become a branch.
20317 if (!Subtarget.hasCMov())
20320 // fold (sdiv X, pow2)
20321 EVT VT = N->getValueType(0);
20322 // FIXME: Support i8.
20323 if (VT != MVT::i16 && VT != MVT::i32 &&
20324 !(Subtarget.is64Bit() && VT == MVT::i64))
20327 unsigned Lg2 = Divisor.countTrailingZeros();
20329 // If the divisor is 2 or -2, the default expansion is better.
20334 SDValue N0 = N->getOperand(0);
20335 SDValue Zero = DAG.getConstant(0, DL, VT);
20336 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
20337 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
20339 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
20340 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
20341 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
20342 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
20344 Created.push_back(Cmp.getNode());
20345 Created.push_back(Add.getNode());
20346 Created.push_back(CMov.getNode());
20350 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i64));
20352 // If we're dividing by a positive value, we're done. Otherwise, we must
20353 // negate the result.
20354 if (Divisor.isNonNegative())
20357 Created.push_back(SRA.getNode());
20358 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
20361 /// Result of 'and' is compared against zero. Change to a BT node if possible.
20362 /// Returns the BT node and the condition code needed to use it.
20363 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
20364 const SDLoc &dl, SelectionDAG &DAG,
20366 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
20367 SDValue Op0 = And.getOperand(0);
20368 SDValue Op1 = And.getOperand(1);
20369 if (Op0.getOpcode() == ISD::TRUNCATE)
20370 Op0 = Op0.getOperand(0);
20371 if (Op1.getOpcode() == ISD::TRUNCATE)
20372 Op1 = Op1.getOperand(0);
20374 SDValue Src, BitNo;
20375 if (Op1.getOpcode() == ISD::SHL)
20376 std::swap(Op0, Op1);
20377 if (Op0.getOpcode() == ISD::SHL) {
20378 if (isOneConstant(Op0.getOperand(0))) {
20379 // If we looked past a truncate, check that it's only truncating away
20381 unsigned BitWidth = Op0.getValueSizeInBits();
20382 unsigned AndBitWidth = And.getValueSizeInBits();
20383 if (BitWidth > AndBitWidth) {
20384 KnownBits Known = DAG.computeKnownBits(Op0);
20385 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
20389 BitNo = Op0.getOperand(1);
20391 } else if (Op1.getOpcode() == ISD::Constant) {
20392 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
20393 uint64_t AndRHSVal = AndRHS->getZExtValue();
20394 SDValue AndLHS = Op0;
20396 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
20397 Src = AndLHS.getOperand(0);
20398 BitNo = AndLHS.getOperand(1);
20400 // Use BT if the immediate can't be encoded in a TEST instruction or we
20401 // are optimizing for size and the immedaite won't fit in a byte.
20402 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
20403 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
20404 isPowerOf2_64(AndRHSVal)) {
20406 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
20407 Src.getValueType());
20412 // No patterns found, give up.
20413 if (!Src.getNode())
20416 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
20417 // instruction. Since the shift amount is in-range-or-undefined, we know
20418 // that doing a bittest on the i32 value is ok. We extend to i32 because
20419 // the encoding for the i16 version is larger than the i32 version.
20420 // Also promote i16 to i32 for performance / code size reason.
20421 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
20422 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
20424 // See if we can use the 32-bit instruction instead of the 64-bit one for a
20425 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
20426 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
20427 // known to be zero.
20428 if (Src.getValueType() == MVT::i64 &&
20429 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
20430 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
20432 // If the operand types disagree, extend the shift amount to match. Since
20433 // BT ignores high bits (like shifts) we can use anyextend.
20434 if (Src.getValueType() != BitNo.getValueType())
20435 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
20437 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
20439 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
20442 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
20444 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
20449 // SSE Condition code mapping:
20458 switch (SetCCOpcode) {
20459 default: llvm_unreachable("Unexpected SETCC condition");
20461 case ISD::SETEQ: SSECC = 0; break;
20463 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
20465 case ISD::SETOLT: SSECC = 1; break;
20467 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
20469 case ISD::SETOLE: SSECC = 2; break;
20470 case ISD::SETUO: SSECC = 3; break;
20472 case ISD::SETNE: SSECC = 4; break;
20473 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
20474 case ISD::SETUGE: SSECC = 5; break;
20475 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
20476 case ISD::SETUGT: SSECC = 6; break;
20477 case ISD::SETO: SSECC = 7; break;
20478 case ISD::SETUEQ: SSECC = 8; break;
20479 case ISD::SETONE: SSECC = 12; break;
20482 std::swap(Op0, Op1);
20487 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
20488 /// concatenate the result back.
20489 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
20490 MVT VT = Op.getSimpleValueType();
20492 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
20493 "Unsupported value type for operation");
20495 unsigned NumElems = VT.getVectorNumElements();
20497 SDValue CC = Op.getOperand(2);
20499 // Extract the LHS vectors
20500 SDValue LHS = Op.getOperand(0);
20501 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
20502 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
20504 // Extract the RHS vectors
20505 SDValue RHS = Op.getOperand(1);
20506 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
20507 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
20509 // Issue the operation on the smaller types and concatenate the result back
20510 MVT EltVT = VT.getVectorElementType();
20511 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
20512 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
20513 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
20514 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
20517 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
20519 SDValue Op0 = Op.getOperand(0);
20520 SDValue Op1 = Op.getOperand(1);
20521 SDValue CC = Op.getOperand(2);
20522 MVT VT = Op.getSimpleValueType();
20525 assert(VT.getVectorElementType() == MVT::i1 &&
20526 "Cannot set masked compare for this operation");
20528 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
20530 // Prefer SETGT over SETLT.
20531 if (SetCCOpcode == ISD::SETLT) {
20532 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
20533 std::swap(Op0, Op1);
20536 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
20539 /// Given a buildvector constant, return a new vector constant with each element
20540 /// incremented or decremented. If incrementing or decrementing would result in
20541 /// unsigned overflow or underflow or this is not a simple vector constant,
20542 /// return an empty value.
20543 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
20544 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
20548 MVT VT = V.getSimpleValueType();
20549 MVT EltVT = VT.getVectorElementType();
20550 unsigned NumElts = VT.getVectorNumElements();
20551 SmallVector<SDValue, 8> NewVecC;
20553 for (unsigned i = 0; i < NumElts; ++i) {
20554 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
20555 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
20558 // Avoid overflow/underflow.
20559 const APInt &EltC = Elt->getAPIntValue();
20560 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
20563 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
20566 return DAG.getBuildVector(VT, DL, NewVecC);
20569 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
20571 /// t = psubus Op0, Op1
20572 /// pcmpeq t, <0..0>
20573 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
20574 ISD::CondCode Cond, const SDLoc &dl,
20575 const X86Subtarget &Subtarget,
20576 SelectionDAG &DAG) {
20577 if (!Subtarget.hasSSE2())
20580 MVT VET = VT.getVectorElementType();
20581 if (VET != MVT::i8 && VET != MVT::i16)
20587 case ISD::SETULT: {
20588 // If the comparison is against a constant we can turn this into a
20589 // setule. With psubus, setule does not require a swap. This is
20590 // beneficial because the constant in the register is no longer
20591 // destructed as the destination so it can be hoisted out of a loop.
20592 // Only do this pre-AVX since vpcmp* is no longer destructive.
20593 if (Subtarget.hasAVX())
20595 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
20601 case ISD::SETUGT: {
20602 // If the comparison is against a constant, we can turn this into a setuge.
20603 // This is beneficial because materializing a constant 0 for the PCMPEQ is
20604 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
20605 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
20606 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
20613 // Psubus is better than flip-sign because it requires no inversion.
20615 std::swap(Op0, Op1);
20621 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
20622 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
20623 DAG.getConstant(0, dl, VT));
20626 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
20627 SelectionDAG &DAG) {
20628 SDValue Op0 = Op.getOperand(0);
20629 SDValue Op1 = Op.getOperand(1);
20630 SDValue CC = Op.getOperand(2);
20631 MVT VT = Op.getSimpleValueType();
20632 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
20633 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
20638 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
20639 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
20643 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
20644 assert(VT.getVectorNumElements() <= 16);
20645 Opc = X86ISD::CMPM;
20647 Opc = X86ISD::CMPP;
20648 // The SSE/AVX packed FP comparison nodes are defined with a
20649 // floating-point vector result that matches the operand type. This allows
20650 // them to work with an SSE1 target (integer vector types are not legal).
20651 VT = Op0.getSimpleValueType();
20654 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
20655 // emit two comparisons and a logic op to tie them together.
20657 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
20658 if (SSECC >= 8 && !Subtarget.hasAVX()) {
20659 // LLVM predicate is SETUEQ or SETONE.
20661 unsigned CombineOpc;
20662 if (Cond == ISD::SETUEQ) {
20665 CombineOpc = X86ISD::FOR;
20667 assert(Cond == ISD::SETONE);
20670 CombineOpc = X86ISD::FAND;
20673 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20674 DAG.getTargetConstant(CC0, dl, MVT::i8));
20675 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20676 DAG.getTargetConstant(CC1, dl, MVT::i8));
20677 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
20679 // Handle all other FP comparisons here.
20680 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
20681 DAG.getTargetConstant(SSECC, dl, MVT::i8));
20684 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
20685 // result type of SETCC. The bitcast is expected to be optimized away
20686 // during combining/isel.
20687 if (Opc == X86ISD::CMPP)
20688 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
20693 MVT VTOp0 = Op0.getSimpleValueType();
20695 assert(VTOp0 == Op1.getSimpleValueType() &&
20696 "Expected operands with same type!");
20697 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
20698 "Invalid number of packed elements for source and destination!");
20700 // The non-AVX512 code below works under the assumption that source and
20701 // destination types are the same.
20702 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
20703 "Value types for source and destination must be the same!");
20705 // The result is boolean, but operands are int/float
20706 if (VT.getVectorElementType() == MVT::i1) {
20707 // In AVX-512 architecture setcc returns mask with i1 elements,
20708 // But there is no compare instruction for i8 and i16 elements in KNL.
20709 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
20710 "Unexpected operand type");
20711 return LowerIntVSETCC_AVX512(Op, DAG);
20714 // Lower using XOP integer comparisons.
20715 if (VT.is128BitVector() && Subtarget.hasXOP()) {
20716 // Translate compare code to XOP PCOM compare mode.
20717 unsigned CmpMode = 0;
20719 default: llvm_unreachable("Unexpected SETCC condition");
20721 case ISD::SETLT: CmpMode = 0x00; break;
20723 case ISD::SETLE: CmpMode = 0x01; break;
20725 case ISD::SETGT: CmpMode = 0x02; break;
20727 case ISD::SETGE: CmpMode = 0x03; break;
20728 case ISD::SETEQ: CmpMode = 0x04; break;
20729 case ISD::SETNE: CmpMode = 0x05; break;
20732 // Are we comparing unsigned or signed integers?
20734 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
20736 return DAG.getNode(Opc, dl, VT, Op0, Op1,
20737 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
20740 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
20741 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
20742 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
20743 SDValue BC0 = peekThroughBitcasts(Op0);
20744 if (BC0.getOpcode() == ISD::AND) {
20746 SmallVector<APInt, 64> EltBits;
20747 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
20748 VT.getScalarSizeInBits(), UndefElts,
20749 EltBits, false, false)) {
20750 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
20752 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
20758 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
20759 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
20760 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
20761 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
20762 if (C1 && C1->getAPIntValue().isPowerOf2()) {
20763 unsigned BitWidth = VT.getScalarSizeInBits();
20764 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
20766 SDValue Result = Op0.getOperand(0);
20767 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
20768 DAG.getConstant(ShiftAmt, dl, VT));
20769 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
20770 DAG.getConstant(BitWidth - 1, dl, VT));
20775 // Break 256-bit integer vector compare into smaller ones.
20776 if (VT.is256BitVector() && !Subtarget.hasInt256())
20777 return Lower256IntVSETCC(Op, DAG);
20779 // If this is a SETNE against the signed minimum value, change it to SETGT.
20780 // If this is a SETNE against the signed maximum value, change it to SETLT.
20781 // which will be swapped to SETGT.
20782 // Otherwise we use PCMPEQ+invert.
20784 if (Cond == ISD::SETNE &&
20785 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
20786 if (ConstValue.isMinSignedValue())
20788 else if (ConstValue.isMaxSignedValue())
20792 // If both operands are known non-negative, then an unsigned compare is the
20793 // same as a signed compare and there's no need to flip signbits.
20794 // TODO: We could check for more general simplifications here since we're
20795 // computing known bits.
20796 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
20797 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
20799 // Special case: Use min/max operations for unsigned compares.
20800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20801 if (ISD::isUnsignedIntSetCC(Cond) &&
20802 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
20803 TLI.isOperationLegal(ISD::UMIN, VT)) {
20804 // If we have a constant operand, increment/decrement it and change the
20805 // condition to avoid an invert.
20806 if (Cond == ISD::SETUGT) {
20807 // X > C --> X >= (C+1) --> X == umax(X, C+1)
20808 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
20810 Cond = ISD::SETUGE;
20813 if (Cond == ISD::SETULT) {
20814 // X < C --> X <= (C-1) --> X == umin(X, C-1)
20815 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
20817 Cond = ISD::SETULE;
20820 bool Invert = false;
20823 default: llvm_unreachable("Unexpected condition code");
20824 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
20825 case ISD::SETULE: Opc = ISD::UMIN; break;
20826 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
20827 case ISD::SETUGE: Opc = ISD::UMAX; break;
20830 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20831 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
20833 // If the logical-not of the result is required, perform that now.
20835 Result = DAG.getNOT(dl, Result, VT);
20840 // Try to use SUBUS and PCMPEQ.
20841 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
20844 // We are handling one of the integer comparisons here. Since SSE only has
20845 // GT and EQ comparisons for integer, swapping operands and multiple
20846 // operations may be required for some comparisons.
20847 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
20849 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
20850 Cond == ISD::SETGE || Cond == ISD::SETUGE;
20851 bool Invert = Cond == ISD::SETNE ||
20852 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
20855 std::swap(Op0, Op1);
20857 // Check that the operation in question is available (most are plain SSE2,
20858 // but PCMPGTQ and PCMPEQQ have different requirements).
20859 if (VT == MVT::v2i64) {
20860 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
20861 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
20863 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20864 // bits of the inputs before performing those operations. The lower
20865 // compare is always unsigned.
20868 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
20870 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
20872 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
20873 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
20875 // Cast everything to the right type.
20876 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20877 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20879 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
20880 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
20881 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
20883 // Create masks for only the low parts/high parts of the 64 bit integers.
20884 static const int MaskHi[] = { 1, 1, 3, 3 };
20885 static const int MaskLo[] = { 0, 0, 2, 2 };
20886 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
20887 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
20888 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
20890 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
20891 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
20894 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20896 return DAG.getBitcast(VT, Result);
20899 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
20900 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
20901 // pcmpeqd + pshufd + pand.
20902 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
20904 // First cast everything to the right type.
20905 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20906 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20909 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
20911 // Make sure the lower and upper halves are both all-ones.
20912 static const int Mask[] = { 1, 0, 3, 2 };
20913 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
20914 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
20917 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20919 return DAG.getBitcast(VT, Result);
20923 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20924 // bits of the inputs before performing those operations.
20926 MVT EltVT = VT.getVectorElementType();
20927 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
20929 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
20930 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
20933 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20935 // If the logical-not of the result is required, perform that now.
20937 Result = DAG.getNOT(dl, Result, VT);
20942 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
20943 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
20944 const SDLoc &dl, SelectionDAG &DAG,
20945 const X86Subtarget &Subtarget,
20947 // Only support equality comparisons.
20948 if (CC != ISD::SETEQ && CC != ISD::SETNE)
20951 // Must be a bitcast from vXi1.
20952 if (Op0.getOpcode() != ISD::BITCAST)
20955 Op0 = Op0.getOperand(0);
20956 MVT VT = Op0.getSimpleValueType();
20957 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
20958 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
20959 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
20962 X86::CondCode X86Cond;
20963 if (isNullConstant(Op1)) {
20964 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
20965 } else if (isAllOnesConstant(Op1)) {
20966 // C flag is set for all ones.
20967 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
20971 // If the input is an AND, we can combine it's operands into the KTEST.
20972 bool KTestable = false;
20973 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
20975 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
20977 if (!isNullConstant(Op1))
20979 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
20980 SDValue LHS = Op0.getOperand(0);
20981 SDValue RHS = Op0.getOperand(1);
20982 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
20983 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
20986 // If the input is an OR, we can combine it's operands into the KORTEST.
20989 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
20990 LHS = Op0.getOperand(0);
20991 RHS = Op0.getOperand(1);
20994 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
20995 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
20998 /// Emit flags for the given setcc condition and operands. Also returns the
20999 /// corresponding X86 condition code constant in X86CC.
21000 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
21001 ISD::CondCode CC, const SDLoc &dl,
21003 SDValue &X86CC) const {
21004 // Optimize to BT if possible.
21005 // Lower (X & (1 << N)) == 0 to BT(X, N).
21006 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
21007 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
21008 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
21009 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21010 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
21014 // Try to use PTEST for a tree ORs equality compared with 0.
21015 // TODO: We could do AND tree with all 1s as well by using the C flag.
21016 if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
21017 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21018 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
21022 // Try to lower using KORTEST or KTEST.
21023 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
21026 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
21028 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
21029 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21030 // If the input is a setcc, then reuse the input setcc or use a new one with
21031 // the inverted condition.
21032 if (Op0.getOpcode() == X86ISD::SETCC) {
21033 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
21035 X86CC = Op0.getOperand(0);
21037 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
21038 CCode = X86::GetOppositeBranchCondition(CCode);
21039 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21042 return Op0.getOperand(1);
21046 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
21047 X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
21048 if (CondCode == X86::COND_INVALID)
21051 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
21052 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
21053 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
21057 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
21059 MVT VT = Op.getSimpleValueType();
21061 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
21063 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
21064 SDValue Op0 = Op.getOperand(0);
21065 SDValue Op1 = Op.getOperand(1);
21067 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
21069 // Handle f128 first, since one possible outcome is a normal integer
21070 // comparison which gets handled by emitFlagsForSetcc.
21071 if (Op0.getValueType() == MVT::f128) {
21072 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1);
21074 // If softenSetCCOperands returned a scalar, use it.
21075 if (!Op1.getNode()) {
21076 assert(Op0.getValueType() == Op.getValueType() &&
21077 "Unexpected setcc expansion!");
21083 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
21087 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
21090 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
21091 SDValue LHS = Op.getOperand(0);
21092 SDValue RHS = Op.getOperand(1);
21093 SDValue Carry = Op.getOperand(2);
21094 SDValue Cond = Op.getOperand(3);
21097 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
21098 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
21100 // Recreate the carry if needed.
21101 EVT CarryVT = Carry.getValueType();
21102 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
21103 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
21104 Carry, DAG.getConstant(NegOne, DL, CarryVT));
21106 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
21107 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
21108 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
21111 // This function returns three things: the arithmetic computation itself
21112 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
21113 // flag and the condition code define the case in which the arithmetic
21114 // computation overflows.
21115 static std::pair<SDValue, SDValue>
21116 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
21117 assert(Op.getResNo() == 0 && "Unexpected result number!");
21118 SDValue Value, Overflow;
21119 SDValue LHS = Op.getOperand(0);
21120 SDValue RHS = Op.getOperand(1);
21121 unsigned BaseOp = 0;
21123 switch (Op.getOpcode()) {
21124 default: llvm_unreachable("Unknown ovf instruction!");
21126 BaseOp = X86ISD::ADD;
21127 Cond = X86::COND_O;
21130 BaseOp = X86ISD::ADD;
21131 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
21134 BaseOp = X86ISD::SUB;
21135 Cond = X86::COND_O;
21138 BaseOp = X86ISD::SUB;
21139 Cond = X86::COND_B;
21142 BaseOp = X86ISD::SMUL;
21143 Cond = X86::COND_O;
21146 BaseOp = X86ISD::UMUL;
21147 Cond = X86::COND_O;
21152 // Also sets EFLAGS.
21153 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21154 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
21155 Overflow = Value.getValue(1);
21158 return std::make_pair(Value, Overflow);
21161 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
21162 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
21163 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
21164 // looks for this combo and may remove the "setcc" instruction if the "setcc"
21165 // has only one use.
21167 X86::CondCode Cond;
21168 SDValue Value, Overflow;
21169 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
21171 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
21172 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
21173 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
21176 /// Return true if opcode is a X86 logical comparison.
21177 static bool isX86LogicalCmp(SDValue Op) {
21178 unsigned Opc = Op.getOpcode();
21179 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
21180 Opc == X86ISD::SAHF)
21182 if (Op.getResNo() == 1 &&
21183 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
21184 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
21185 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
21191 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
21192 if (V.getOpcode() != ISD::TRUNCATE)
21195 SDValue VOp0 = V.getOperand(0);
21196 unsigned InBits = VOp0.getValueSizeInBits();
21197 unsigned Bits = V.getValueSizeInBits();
21198 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
21201 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
21202 bool AddTest = true;
21203 SDValue Cond = Op.getOperand(0);
21204 SDValue Op1 = Op.getOperand(1);
21205 SDValue Op2 = Op.getOperand(2);
21207 MVT VT = Op1.getSimpleValueType();
21210 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
21211 // are available or VBLENDV if AVX is available.
21212 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
21213 if (Cond.getOpcode() == ISD::SETCC &&
21214 ((Subtarget.hasSSE2() && VT == MVT::f64) ||
21215 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
21216 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
21217 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
21218 unsigned SSECC = translateX86FSETCC(
21219 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
21221 if (Subtarget.hasAVX512()) {
21223 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
21224 DAG.getTargetConstant(SSECC, DL, MVT::i8));
21225 assert(!VT.isVector() && "Not a scalar type?");
21226 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
21229 if (SSECC < 8 || Subtarget.hasAVX()) {
21230 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
21231 DAG.getTargetConstant(SSECC, DL, MVT::i8));
21233 // If we have AVX, we can use a variable vector select (VBLENDV) instead
21234 // of 3 logic instructions for size savings and potentially speed.
21235 // Unfortunately, there is no scalar form of VBLENDV.
21237 // If either operand is a +0.0 constant, don't try this. We can expect to
21238 // optimize away at least one of the logic instructions later in that
21239 // case, so that sequence would be faster than a variable blend.
21241 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
21242 // uses XMM0 as the selection register. That may need just as many
21243 // instructions as the AND/ANDN/OR sequence due to register moves, so
21245 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
21246 !isNullFPConstant(Op2)) {
21247 // Convert to vectors, do a VSELECT, and convert back to scalar.
21248 // All of the conversions should be optimized away.
21249 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
21250 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
21251 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
21252 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
21254 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
21255 VCmp = DAG.getBitcast(VCmpVT, VCmp);
21257 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
21259 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
21260 VSel, DAG.getIntPtrConstant(0, DL));
21262 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
21263 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
21264 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
21268 // AVX512 fallback is to lower selects of scalar floats to masked moves.
21269 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
21270 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
21271 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
21274 // For v64i1 without 64-bit support we need to split and rejoin.
21275 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
21276 assert(Subtarget.hasBWI() && "Expected BWI to be legal");
21277 SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
21278 SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
21279 SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
21280 SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
21281 SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
21282 SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
21283 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
21286 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
21288 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
21289 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
21290 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
21291 Op1Scalar = Op1.getOperand(0);
21293 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
21294 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
21295 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
21296 Op2Scalar = Op2.getOperand(0);
21297 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
21298 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
21299 Op1Scalar, Op2Scalar);
21300 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
21301 return DAG.getBitcast(VT, newSelect);
21302 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
21303 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
21304 DAG.getIntPtrConstant(0, DL));
21308 if (Cond.getOpcode() == ISD::SETCC) {
21309 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
21311 // If the condition was updated, it's possible that the operands of the
21312 // select were also updated (for example, EmitTest has a RAUW). Refresh
21313 // the local references to the select operands in case they got stale.
21314 Op1 = Op.getOperand(1);
21315 Op2 = Op.getOperand(2);
21319 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
21320 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
21321 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
21322 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
21323 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
21324 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
21325 if (Cond.getOpcode() == X86ISD::SETCC &&
21326 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
21327 isNullConstant(Cond.getOperand(1).getOperand(1))) {
21328 SDValue Cmp = Cond.getOperand(1);
21329 unsigned CondCode = Cond.getConstantOperandVal(0);
21331 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
21332 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
21333 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
21334 SDValue CmpOp0 = Cmp.getOperand(0);
21336 // Apply further optimizations for special cases
21337 // (select (x != 0), -1, 0) -> neg & sbb
21338 // (select (x == 0), 0, -1) -> neg & sbb
21339 if (isNullConstant(Y) &&
21340 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
21341 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
21342 SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
21343 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21344 Zero = DAG.getConstant(0, DL, Op.getValueType());
21345 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
21348 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
21349 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
21350 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21352 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21353 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
21354 SDValue Res = // Res = 0 or -1.
21355 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
21357 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
21358 Res = DAG.getNOT(DL, Res, Res.getValueType());
21360 if (!isNullConstant(Op2))
21361 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
21363 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
21364 Cmp.getOperand(0).getOpcode() == ISD::AND &&
21365 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
21366 SDValue CmpOp0 = Cmp.getOperand(0);
21367 SDValue Src1, Src2;
21368 // true if Op2 is XOR or OR operator and one of its operands
21370 // ( a , a op b) || ( b , a op b)
21371 auto isOrXorPattern = [&]() {
21372 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
21373 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
21375 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
21382 if (isOrXorPattern()) {
21384 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
21385 // we need mask of all zeros or ones with same size of the other
21387 if (CmpSz > VT.getSizeInBits())
21388 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
21389 else if (CmpSz < VT.getSizeInBits())
21390 Neg = DAG.getNode(ISD::AND, DL, VT,
21391 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
21392 DAG.getConstant(1, DL, VT));
21395 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
21396 Neg); // -(and (x, 0x1))
21397 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
21398 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
21403 // Look past (and (setcc_carry (cmp ...)), 1).
21404 if (Cond.getOpcode() == ISD::AND &&
21405 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
21406 isOneConstant(Cond.getOperand(1)))
21407 Cond = Cond.getOperand(0);
21409 // If condition flag is set by a X86ISD::CMP, then use it as the condition
21410 // setting operand in place of the X86ISD::SETCC.
21411 unsigned CondOpcode = Cond.getOpcode();
21412 if (CondOpcode == X86ISD::SETCC ||
21413 CondOpcode == X86ISD::SETCC_CARRY) {
21414 CC = Cond.getOperand(0);
21416 SDValue Cmp = Cond.getOperand(1);
21417 bool IllegalFPCMov = false;
21418 if (VT.isFloatingPoint() && !VT.isVector() &&
21419 !isScalarFPTypeInSSEReg(VT)) // FPStack?
21420 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
21422 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
21423 Cmp.getOpcode() == X86ISD::BT) { // FIXME
21427 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
21428 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
21429 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
21431 X86::CondCode X86Cond;
21432 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
21434 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
21439 // Look past the truncate if the high bits are known zero.
21440 if (isTruncWithZeroHighBitsInput(Cond, DAG))
21441 Cond = Cond.getOperand(0);
21443 // We know the result of AND is compared against zero. Try to match
21445 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
21447 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
21456 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
21457 Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
21458 X86::COND_NE, DL, DAG);
21461 // a < b ? -1 : 0 -> RES = ~setcc_carry
21462 // a < b ? 0 : -1 -> RES = setcc_carry
21463 // a >= b ? -1 : 0 -> RES = setcc_carry
21464 // a >= b ? 0 : -1 -> RES = ~setcc_carry
21465 if (Cond.getOpcode() == X86ISD::SUB) {
21466 Cond = ConvertCmpIfNecessary(Cond, DAG);
21467 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
21469 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
21470 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
21471 (isNullConstant(Op1) || isNullConstant(Op2))) {
21473 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
21474 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
21475 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
21476 return DAG.getNOT(DL, Res, Res.getValueType());
21481 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
21482 // widen the cmov and push the truncate through. This avoids introducing a new
21483 // branch during isel and doesn't add any extensions.
21484 if (Op.getValueType() == MVT::i8 &&
21485 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
21486 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
21487 if (T1.getValueType() == T2.getValueType() &&
21488 // Blacklist CopyFromReg to avoid partial register stalls.
21489 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
21490 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
21492 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
21496 // Or finally, promote i8 cmovs if we have CMOV,
21497 // or i16 cmovs if it won't prevent folding a load.
21498 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
21499 // legal, but EmitLoweredSelect() can not deal with these extensions
21500 // being inserted between two CMOV's. (in i16 case too TBN)
21501 // https://bugs.llvm.org/show_bug.cgi?id=40974
21502 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
21503 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
21504 !MayFoldLoad(Op2))) {
21505 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
21506 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
21507 SDValue Ops[] = { Op2, Op1, CC, Cond };
21508 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
21509 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
21512 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
21513 // condition is true.
21514 SDValue Ops[] = { Op2, Op1, CC, Cond };
21515 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
21518 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
21519 const X86Subtarget &Subtarget,
21520 SelectionDAG &DAG) {
21521 MVT VT = Op->getSimpleValueType(0);
21522 SDValue In = Op->getOperand(0);
21523 MVT InVT = In.getSimpleValueType();
21524 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
21525 MVT VTElt = VT.getVectorElementType();
21528 unsigned NumElts = VT.getVectorNumElements();
21530 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
21532 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
21533 // If v16i32 is to be avoided, we'll need to split and concatenate.
21534 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
21535 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
21537 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
21540 // Widen to 512-bits if VLX is not supported.
21541 MVT WideVT = ExtVT;
21542 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
21543 NumElts *= 512 / ExtVT.getSizeInBits();
21544 InVT = MVT::getVectorVT(MVT::i1, NumElts);
21545 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
21546 In, DAG.getIntPtrConstant(0, dl));
21547 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
21551 MVT WideEltVT = WideVT.getVectorElementType();
21552 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
21553 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
21554 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
21556 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
21557 SDValue Zero = DAG.getConstant(0, dl, WideVT);
21558 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
21561 // Truncate if we had to extend i16/i8 above.
21563 WideVT = MVT::getVectorVT(VTElt, NumElts);
21564 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
21567 // Extract back to 128/256-bit if we widened.
21569 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
21570 DAG.getIntPtrConstant(0, dl));
21575 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21576 SelectionDAG &DAG) {
21577 SDValue In = Op->getOperand(0);
21578 MVT InVT = In.getSimpleValueType();
21580 if (InVT.getVectorElementType() == MVT::i1)
21581 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21583 assert(Subtarget.hasAVX() && "Expected AVX support");
21584 return LowerAVXExtend(Op, DAG, Subtarget);
21587 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
21588 // For sign extend this needs to handle all vector sizes and SSE4.1 and
21589 // non-SSE4.1 targets. For zero extend this should only handle inputs of
21590 // MVT::v64i8 when BWI is not supported, but AVX512 is.
21591 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
21592 const X86Subtarget &Subtarget,
21593 SelectionDAG &DAG) {
21594 SDValue In = Op->getOperand(0);
21595 MVT VT = Op->getSimpleValueType(0);
21596 MVT InVT = In.getSimpleValueType();
21598 MVT SVT = VT.getVectorElementType();
21599 MVT InSVT = InVT.getVectorElementType();
21600 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
21602 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
21604 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
21606 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
21607 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
21608 !(VT.is512BitVector() && Subtarget.hasAVX512()))
21612 unsigned Opc = Op.getOpcode();
21613 unsigned NumElts = VT.getVectorNumElements();
21615 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
21616 // For 512-bit vectors, we need 128-bits or 256-bits.
21617 if (InVT.getSizeInBits() > 128) {
21618 // Input needs to be at least the same number of elements as output, and
21619 // at least 128-bits.
21620 int InSize = InSVT.getSizeInBits() * NumElts;
21621 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
21622 InVT = In.getSimpleValueType();
21625 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
21626 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
21627 // need to be handled here for 256/512-bit results.
21628 if (Subtarget.hasInt256()) {
21629 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
21631 if (InVT.getVectorNumElements() != NumElts)
21632 return DAG.getNode(Op.getOpcode(), dl, VT, In);
21634 // FIXME: Apparently we create inreg operations that could be regular
21637 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
21638 : ISD::ZERO_EXTEND;
21639 return DAG.getNode(ExtOpc, dl, VT, In);
21642 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
21643 if (Subtarget.hasAVX()) {
21644 assert(VT.is256BitVector() && "256-bit vector expected");
21645 MVT HalfVT = VT.getHalfNumVectorElementsVT();
21646 int HalfNumElts = HalfVT.getVectorNumElements();
21648 unsigned NumSrcElts = InVT.getVectorNumElements();
21649 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
21650 for (int i = 0; i != HalfNumElts; ++i)
21651 HiMask[i] = HalfNumElts + i;
21653 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
21654 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
21655 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
21656 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
21659 // We should only get here for sign extend.
21660 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
21661 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
21663 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
21665 SDValue SignExt = Curr;
21667 // As SRAI is only available on i16/i32 types, we expand only up to i32
21668 // and handle i64 separately.
21669 if (InVT != MVT::v4i32) {
21670 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
21672 unsigned DestWidth = DestVT.getScalarSizeInBits();
21673 unsigned Scale = DestWidth / InSVT.getSizeInBits();
21675 unsigned InNumElts = InVT.getVectorNumElements();
21676 unsigned DestElts = DestVT.getVectorNumElements();
21678 // Build a shuffle mask that takes each input element and places it in the
21679 // MSBs of the new element size.
21680 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
21681 for (unsigned i = 0; i != DestElts; ++i)
21682 Mask[i * Scale + (Scale - 1)] = i;
21684 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
21685 Curr = DAG.getBitcast(DestVT, Curr);
21687 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
21688 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
21689 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
21692 if (VT == MVT::v2i64) {
21693 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
21694 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
21695 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
21696 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
21697 SignExt = DAG.getBitcast(VT, SignExt);
21703 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21704 SelectionDAG &DAG) {
21705 MVT VT = Op->getSimpleValueType(0);
21706 SDValue In = Op->getOperand(0);
21707 MVT InVT = In.getSimpleValueType();
21710 if (InVT.getVectorElementType() == MVT::i1)
21711 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21713 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
21714 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
21715 "Expected same number of elements");
21716 assert((VT.getVectorElementType() == MVT::i16 ||
21717 VT.getVectorElementType() == MVT::i32 ||
21718 VT.getVectorElementType() == MVT::i64) &&
21719 "Unexpected element type");
21720 assert((InVT.getVectorElementType() == MVT::i8 ||
21721 InVT.getVectorElementType() == MVT::i16 ||
21722 InVT.getVectorElementType() == MVT::i32) &&
21723 "Unexpected element type");
21725 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
21726 if (InVT == MVT::v8i8) {
21727 if (VT != MVT::v8i64)
21730 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
21731 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
21732 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
21735 if (Subtarget.hasInt256())
21738 // Optimize vectors in AVX mode
21739 // Sign extend v8i16 to v8i32 and
21742 // Divide input vector into two parts
21743 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
21744 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
21745 // concat the vectors to original VT
21746 MVT HalfVT = VT.getHalfNumVectorElementsVT();
21747 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
21749 unsigned NumElems = InVT.getVectorNumElements();
21750 SmallVector<int,8> ShufMask(NumElems, -1);
21751 for (unsigned i = 0; i != NumElems/2; ++i)
21752 ShufMask[i] = i + NumElems/2;
21754 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
21755 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
21757 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
21760 /// Change a vector store into a pair of half-size vector stores.
21761 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
21762 SDValue StoredVal = Store->getValue();
21763 assert((StoredVal.getValueType().is256BitVector() ||
21764 StoredVal.getValueType().is512BitVector()) &&
21765 "Expecting 256/512-bit op");
21767 // Splitting volatile memory ops is not allowed unless the operation was not
21768 // legal to begin with. We are assuming the input op is legal (this transform
21769 // is only used for targets with AVX).
21770 if (!Store->isSimple())
21773 MVT StoreVT = StoredVal.getSimpleValueType();
21774 unsigned NumElems = StoreVT.getVectorNumElements();
21775 unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
21776 unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
21779 SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
21780 SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
21781 SDValue Ptr0 = Store->getBasePtr();
21782 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
21783 unsigned Alignment = Store->getAlignment();
21785 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
21786 Alignment, Store->getMemOperand()->getFlags());
21787 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
21788 Store->getPointerInfo().getWithOffset(HalfAlign),
21789 MinAlign(Alignment, HalfAlign),
21790 Store->getMemOperand()->getFlags());
21791 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
21794 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
21796 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
21797 SelectionDAG &DAG) {
21798 SDValue StoredVal = Store->getValue();
21799 assert(StoreVT.is128BitVector() &&
21800 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
21801 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
21803 // Splitting volatile memory ops is not allowed unless the operation was not
21804 // legal to begin with. We are assuming the input op is legal (this transform
21805 // is only used for targets with AVX).
21806 if (!Store->isSimple())
21809 MVT StoreSVT = StoreVT.getScalarType();
21810 unsigned NumElems = StoreVT.getVectorNumElements();
21811 unsigned ScalarSize = StoreSVT.getStoreSize();
21812 unsigned Alignment = Store->getAlignment();
21815 SmallVector<SDValue, 4> Stores;
21816 for (unsigned i = 0; i != NumElems; ++i) {
21817 unsigned Offset = i * ScalarSize;
21818 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
21819 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
21820 DAG.getIntPtrConstant(i, DL));
21821 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
21822 Store->getPointerInfo().getWithOffset(Offset),
21823 MinAlign(Alignment, Offset),
21824 Store->getMemOperand()->getFlags());
21825 Stores.push_back(Ch);
21827 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
21830 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
21831 SelectionDAG &DAG) {
21832 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
21834 SDValue StoredVal = St->getValue();
21836 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
21837 if (StoredVal.getValueType().isVector() &&
21838 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
21839 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
21841 assert(!St->isTruncatingStore() && "Expected non-truncating store");
21842 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21843 "Expected AVX512F without AVX512DQI");
21845 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
21846 DAG.getUNDEF(MVT::v16i1), StoredVal,
21847 DAG.getIntPtrConstant(0, dl));
21848 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
21849 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
21851 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21852 St->getPointerInfo(), St->getAlignment(),
21853 St->getMemOperand()->getFlags());
21856 if (St->isTruncatingStore())
21859 // If this is a 256-bit store of concatenated ops, we are better off splitting
21860 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
21861 // and each half can execute independently. Some cores would split the op into
21862 // halves anyway, so the concat (vinsertf128) is purely an extra op.
21863 MVT StoreVT = StoredVal.getSimpleValueType();
21864 if (StoreVT.is256BitVector()) {
21865 SmallVector<SDValue, 4> CatOps;
21866 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
21867 return splitVectorStore(St, DAG);
21871 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21872 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
21874 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
21875 TargetLowering::TypeWidenVector && "Unexpected type action!");
21877 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
21878 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
21879 DAG.getUNDEF(StoreVT));
21881 if (Subtarget.hasSSE2()) {
21882 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
21884 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
21885 MVT CastVT = MVT::getVectorVT(StVT, 2);
21886 StoredVal = DAG.getBitcast(CastVT, StoredVal);
21887 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
21888 DAG.getIntPtrConstant(0, dl));
21890 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21891 St->getPointerInfo(), St->getAlignment(),
21892 St->getMemOperand()->getFlags());
21894 assert(Subtarget.hasSSE1() && "Expected SSE");
21895 SDVTList Tys = DAG.getVTList(MVT::Other);
21896 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
21897 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
21898 St->getMemOperand());
21901 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
21902 // may emit an illegal shuffle but the expansion is still better than scalar
21903 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
21904 // we'll emit a shuffle and a arithmetic shift.
21905 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
21906 // TODO: It is possible to support ZExt by zeroing the undef values during
21907 // the shuffle phase or after the shuffle.
21908 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
21909 SelectionDAG &DAG) {
21910 MVT RegVT = Op.getSimpleValueType();
21911 assert(RegVT.isVector() && "We only custom lower vector loads.");
21912 assert(RegVT.isInteger() &&
21913 "We only custom lower integer vector loads.");
21915 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
21918 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
21919 if (RegVT.getVectorElementType() == MVT::i1) {
21920 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
21921 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
21922 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
21923 "Expected AVX512F without AVX512DQI");
21925 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
21926 Ld->getPointerInfo(), Ld->getAlignment(),
21927 Ld->getMemOperand()->getFlags());
21929 // Replace chain users with the new chain.
21930 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
21932 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
21933 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
21934 DAG.getBitcast(MVT::v16i1, Val),
21935 DAG.getIntPtrConstant(0, dl));
21936 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
21942 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
21943 /// each of which has no other use apart from the AND / OR.
21944 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
21945 Opc = Op.getOpcode();
21946 if (Opc != ISD::OR && Opc != ISD::AND)
21948 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21949 Op.getOperand(0).hasOneUse() &&
21950 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
21951 Op.getOperand(1).hasOneUse());
21954 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
21955 /// SETCC node has a single use.
21956 static bool isXor1OfSetCC(SDValue Op) {
21957 if (Op.getOpcode() != ISD::XOR)
21959 if (isOneConstant(Op.getOperand(1)))
21960 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21961 Op.getOperand(0).hasOneUse();
21965 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
21966 bool addTest = true;
21967 SDValue Chain = Op.getOperand(0);
21968 SDValue Cond = Op.getOperand(1);
21969 SDValue Dest = Op.getOperand(2);
21972 bool Inverted = false;
21974 if (Cond.getOpcode() == ISD::SETCC) {
21975 // Check for setcc([su]{add,sub,mul}o == 0).
21976 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
21977 isNullConstant(Cond.getOperand(1)) &&
21978 Cond.getOperand(0).getResNo() == 1 &&
21979 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
21980 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
21981 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
21982 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
21983 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
21984 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
21986 Cond = Cond.getOperand(0);
21988 if (SDValue NewCond = LowerSETCC(Cond, DAG))
21993 // FIXME: LowerXALUO doesn't handle these!!
21994 else if (Cond.getOpcode() == X86ISD::ADD ||
21995 Cond.getOpcode() == X86ISD::SUB ||
21996 Cond.getOpcode() == X86ISD::SMUL ||
21997 Cond.getOpcode() == X86ISD::UMUL)
21998 Cond = LowerXALUO(Cond, DAG);
22001 // Look pass (and (setcc_carry (cmp ...)), 1).
22002 if (Cond.getOpcode() == ISD::AND &&
22003 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22004 isOneConstant(Cond.getOperand(1)))
22005 Cond = Cond.getOperand(0);
22007 // If condition flag is set by a X86ISD::CMP, then use it as the condition
22008 // setting operand in place of the X86ISD::SETCC.
22009 unsigned CondOpcode = Cond.getOpcode();
22010 if (CondOpcode == X86ISD::SETCC ||
22011 CondOpcode == X86ISD::SETCC_CARRY) {
22012 CC = Cond.getOperand(0);
22014 SDValue Cmp = Cond.getOperand(1);
22015 unsigned Opc = Cmp.getOpcode();
22016 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
22017 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
22021 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
22025 // These can only come from an arithmetic instruction with overflow,
22026 // e.g. SADDO, UADDO.
22027 Cond = Cond.getOperand(1);
22033 CondOpcode = Cond.getOpcode();
22034 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22035 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22036 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22038 X86::CondCode X86Cond;
22039 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22042 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
22044 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22048 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
22049 SDValue Cmp = Cond.getOperand(0).getOperand(1);
22050 if (CondOpc == ISD::OR) {
22051 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
22052 // two branches instead of an explicit OR instruction with a
22054 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22055 isX86LogicalCmp(Cmp)) {
22056 CC = Cond.getOperand(0).getOperand(0);
22057 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22058 Chain, Dest, CC, Cmp);
22059 CC = Cond.getOperand(1).getOperand(0);
22063 } else { // ISD::AND
22064 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
22065 // two branches instead of an explicit AND instruction with a
22066 // separate test. However, we only do this if this block doesn't
22067 // have a fall-through edge, because this requires an explicit
22068 // jmp when the condition is false.
22069 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22070 isX86LogicalCmp(Cmp) &&
22071 Op.getNode()->hasOneUse()) {
22072 X86::CondCode CCode0 =
22073 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22074 CCode0 = X86::GetOppositeBranchCondition(CCode0);
22075 CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
22076 SDNode *User = *Op.getNode()->use_begin();
22077 // Look for an unconditional branch following this conditional branch.
22078 // We need this because we need to reverse the successors in order
22079 // to implement FCMP_OEQ.
22080 if (User->getOpcode() == ISD::BR) {
22081 SDValue FalseBB = User->getOperand(1);
22083 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22084 assert(NewBR == User);
22088 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
22090 X86::CondCode CCode1 =
22091 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
22092 CCode1 = X86::GetOppositeBranchCondition(CCode1);
22093 CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
22099 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
22100 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
22101 // It should be transformed during dag combiner except when the condition
22102 // is set by a arithmetics with overflow node.
22103 X86::CondCode CCode =
22104 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22105 CCode = X86::GetOppositeBranchCondition(CCode);
22106 CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22107 Cond = Cond.getOperand(0).getOperand(1);
22109 } else if (Cond.getOpcode() == ISD::SETCC &&
22110 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
22111 // For FCMP_OEQ, we can emit
22112 // two branches instead of an explicit AND instruction with a
22113 // separate test. However, we only do this if this block doesn't
22114 // have a fall-through edge, because this requires an explicit
22115 // jmp when the condition is false.
22116 if (Op.getNode()->hasOneUse()) {
22117 SDNode *User = *Op.getNode()->use_begin();
22118 // Look for an unconditional branch following this conditional branch.
22119 // We need this because we need to reverse the successors in order
22120 // to implement FCMP_OEQ.
22121 if (User->getOpcode() == ISD::BR) {
22122 SDValue FalseBB = User->getOperand(1);
22124 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22125 assert(NewBR == User);
22129 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22130 Cond.getOperand(0), Cond.getOperand(1));
22131 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22132 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22133 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22134 Chain, Dest, CC, Cmp);
22135 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22140 } else if (Cond.getOpcode() == ISD::SETCC &&
22141 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
22142 // For FCMP_UNE, we can emit
22143 // two branches instead of an explicit OR instruction with a
22145 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22146 Cond.getOperand(0), Cond.getOperand(1));
22147 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22148 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22149 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22150 Chain, Dest, CC, Cmp);
22151 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22158 // Look pass the truncate if the high bits are known zero.
22159 if (isTruncWithZeroHighBitsInput(Cond, DAG))
22160 Cond = Cond.getOperand(0);
22162 // We know the result of AND is compared against zero. Try to match
22164 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
22166 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
22175 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
22176 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22177 Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
22180 Cond = ConvertCmpIfNecessary(Cond, DAG);
22181 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22182 Chain, Dest, CC, Cond);
22185 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
22186 // Calls to _alloca are needed to probe the stack when allocating more than 4k
22187 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
22188 // that the guard pages used by the OS virtual memory manager are allocated in
22189 // correct sequence.
22191 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
22192 SelectionDAG &DAG) const {
22193 MachineFunction &MF = DAG.getMachineFunction();
22194 bool SplitStack = MF.shouldSplitStack();
22195 bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
22196 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
22197 SplitStack || EmitStackProbe;
22201 SDNode *Node = Op.getNode();
22202 SDValue Chain = Op.getOperand(0);
22203 SDValue Size = Op.getOperand(1);
22204 unsigned Align = Op.getConstantOperandVal(2);
22205 EVT VT = Node->getValueType(0);
22207 // Chain the dynamic stack allocation so that it doesn't modify the stack
22208 // pointer when other instructions are using the stack.
22209 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
22211 bool Is64Bit = Subtarget.is64Bit();
22212 MVT SPTy = getPointerTy(DAG.getDataLayout());
22216 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22217 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
22218 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
22219 " not tell us which reg is the stack pointer!");
22221 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
22222 Chain = SP.getValue(1);
22223 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
22224 unsigned StackAlign = TFI.getStackAlignment();
22225 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
22226 if (Align > StackAlign)
22227 Result = DAG.getNode(ISD::AND, dl, VT, Result,
22228 DAG.getConstant(-(uint64_t)Align, dl, VT));
22229 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
22230 } else if (SplitStack) {
22231 MachineRegisterInfo &MRI = MF.getRegInfo();
22234 // The 64 bit implementation of segmented stacks needs to clobber both r10
22235 // r11. This makes it impossible to use it along with nested parameters.
22236 const Function &F = MF.getFunction();
22237 for (const auto &A : F.args()) {
22238 if (A.hasNestAttr())
22239 report_fatal_error("Cannot use segmented stacks with functions that "
22240 "have nested arguments.");
22244 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
22245 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
22246 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
22247 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
22248 DAG.getRegister(Vreg, SPTy));
22250 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
22251 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
22252 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
22254 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
22255 Register SPReg = RegInfo->getStackRegister();
22256 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
22257 Chain = SP.getValue(1);
22260 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
22261 DAG.getConstant(-(uint64_t)Align, dl, VT));
22262 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
22268 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
22269 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
22271 SDValue Ops[2] = {Result, Chain};
22272 return DAG.getMergeValues(Ops, dl);
22275 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
22276 MachineFunction &MF = DAG.getMachineFunction();
22277 auto PtrVT = getPointerTy(MF.getDataLayout());
22278 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
22280 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
22283 if (!Subtarget.is64Bit() ||
22284 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
22285 // vastart just stores the address of the VarArgsFrameIndex slot into the
22286 // memory location argument.
22287 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
22288 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
22289 MachinePointerInfo(SV));
22293 // gp_offset (0 - 6 * 8)
22294 // fp_offset (48 - 48 + 8 * 16)
22295 // overflow_arg_area (point to parameters coming in memory).
22297 SmallVector<SDValue, 8> MemOps;
22298 SDValue FIN = Op.getOperand(1);
22300 SDValue Store = DAG.getStore(
22301 Op.getOperand(0), DL,
22302 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
22303 MachinePointerInfo(SV));
22304 MemOps.push_back(Store);
22307 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
22308 Store = DAG.getStore(
22309 Op.getOperand(0), DL,
22310 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
22311 MachinePointerInfo(SV, 4));
22312 MemOps.push_back(Store);
22314 // Store ptr to overflow_arg_area
22315 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
22316 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
22318 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
22319 MemOps.push_back(Store);
22321 // Store ptr to reg_save_area.
22322 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
22323 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
22324 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
22325 Store = DAG.getStore(
22326 Op.getOperand(0), DL, RSFIN, FIN,
22327 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
22328 MemOps.push_back(Store);
22329 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
22332 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
22333 assert(Subtarget.is64Bit() &&
22334 "LowerVAARG only handles 64-bit va_arg!");
22335 assert(Op.getNumOperands() == 4);
22337 MachineFunction &MF = DAG.getMachineFunction();
22338 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
22339 // The Win64 ABI uses char* instead of a structure.
22340 return DAG.expandVAArg(Op.getNode());
22342 SDValue Chain = Op.getOperand(0);
22343 SDValue SrcPtr = Op.getOperand(1);
22344 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
22345 unsigned Align = Op.getConstantOperandVal(3);
22348 EVT ArgVT = Op.getNode()->getValueType(0);
22349 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
22350 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
22353 // Decide which area this value should be read from.
22354 // TODO: Implement the AMD64 ABI in its entirety. This simple
22355 // selection mechanism works only for the basic types.
22356 if (ArgVT == MVT::f80) {
22357 llvm_unreachable("va_arg for f80 not yet implemented");
22358 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
22359 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
22360 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
22361 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
22363 llvm_unreachable("Unhandled argument type in LowerVAARG");
22366 if (ArgMode == 2) {
22367 // Sanity Check: Make sure using fp_offset makes sense.
22368 assert(!Subtarget.useSoftFloat() &&
22369 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
22370 Subtarget.hasSSE1());
22373 // Insert VAARG_64 node into the DAG
22374 // VAARG_64 returns two values: Variable Argument Address, Chain
22375 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
22376 DAG.getConstant(ArgMode, dl, MVT::i8),
22377 DAG.getConstant(Align, dl, MVT::i32)};
22378 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
22379 SDValue VAARG = DAG.getMemIntrinsicNode(
22380 X86ISD::VAARG_64, dl,
22381 VTs, InstOps, MVT::i64,
22382 MachinePointerInfo(SV),
22384 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
22385 Chain = VAARG.getValue(1);
22387 // Load the next argument and return it
22388 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
22391 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
22392 SelectionDAG &DAG) {
22393 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
22394 // where a va_list is still an i8*.
22395 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
22396 if (Subtarget.isCallingConvWin64(
22397 DAG.getMachineFunction().getFunction().getCallingConv()))
22398 // Probably a Win64 va_copy.
22399 return DAG.expandVACopy(Op.getNode());
22401 SDValue Chain = Op.getOperand(0);
22402 SDValue DstPtr = Op.getOperand(1);
22403 SDValue SrcPtr = Op.getOperand(2);
22404 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
22405 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
22408 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
22409 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
22411 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
22414 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
22415 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
22419 case X86ISD::VSHLI:
22420 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
22423 case X86ISD::VSRLI:
22424 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
22427 case X86ISD::VSRAI:
22428 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
22430 llvm_unreachable("Unknown target vector shift node");
22433 /// Handle vector element shifts where the shift amount is a constant.
22434 /// Takes immediate version of shift as input.
22435 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
22436 SDValue SrcOp, uint64_t ShiftAmt,
22437 SelectionDAG &DAG) {
22438 MVT ElementType = VT.getVectorElementType();
22440 // Bitcast the source vector to the output type, this is mainly necessary for
22441 // vXi8/vXi64 shifts.
22442 if (VT != SrcOp.getSimpleValueType())
22443 SrcOp = DAG.getBitcast(VT, SrcOp);
22445 // Fold this packed shift into its first operand if ShiftAmt is 0.
22449 // Check for ShiftAmt >= element width
22450 if (ShiftAmt >= ElementType.getSizeInBits()) {
22451 if (Opc == X86ISD::VSRAI)
22452 ShiftAmt = ElementType.getSizeInBits() - 1;
22454 return DAG.getConstant(0, dl, VT);
22457 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
22458 && "Unknown target vector shift-by-constant node");
22460 // Fold this packed vector shift into a build vector if SrcOp is a
22461 // vector of Constants or UNDEFs.
22462 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
22463 SmallVector<SDValue, 8> Elts;
22464 unsigned NumElts = SrcOp->getNumOperands();
22467 default: llvm_unreachable("Unknown opcode!");
22468 case X86ISD::VSHLI:
22469 for (unsigned i = 0; i != NumElts; ++i) {
22470 SDValue CurrentOp = SrcOp->getOperand(i);
22471 if (CurrentOp->isUndef()) {
22472 Elts.push_back(CurrentOp);
22475 auto *ND = cast<ConstantSDNode>(CurrentOp);
22476 const APInt &C = ND->getAPIntValue();
22477 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
22480 case X86ISD::VSRLI:
22481 for (unsigned i = 0; i != NumElts; ++i) {
22482 SDValue CurrentOp = SrcOp->getOperand(i);
22483 if (CurrentOp->isUndef()) {
22484 Elts.push_back(CurrentOp);
22487 auto *ND = cast<ConstantSDNode>(CurrentOp);
22488 const APInt &C = ND->getAPIntValue();
22489 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
22492 case X86ISD::VSRAI:
22493 for (unsigned i = 0; i != NumElts; ++i) {
22494 SDValue CurrentOp = SrcOp->getOperand(i);
22495 if (CurrentOp->isUndef()) {
22496 Elts.push_back(CurrentOp);
22499 auto *ND = cast<ConstantSDNode>(CurrentOp);
22500 const APInt &C = ND->getAPIntValue();
22501 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
22506 return DAG.getBuildVector(VT, dl, Elts);
22509 return DAG.getNode(Opc, dl, VT, SrcOp,
22510 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
22513 /// Handle vector element shifts where the shift amount may or may not be a
22514 /// constant. Takes immediate version of shift as input.
22515 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
22516 SDValue SrcOp, SDValue ShAmt,
22517 const X86Subtarget &Subtarget,
22518 SelectionDAG &DAG) {
22519 MVT SVT = ShAmt.getSimpleValueType();
22520 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
22522 // Catch shift-by-constant.
22523 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
22524 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
22525 CShAmt->getZExtValue(), DAG);
22527 // Change opcode to non-immediate version.
22528 Opc = getTargetVShiftUniformOpcode(Opc, true);
22530 // Need to build a vector containing shift amount.
22531 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
22532 // +====================+============+=======================================+
22533 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
22534 // +====================+============+=======================================+
22535 // | i64 | Yes, No | Use ShAmt as lowest elt |
22536 // | i32 | Yes | zero-extend in-reg |
22537 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
22538 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
22539 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
22540 // +====================+============+=======================================+
22542 if (SVT == MVT::i64)
22543 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
22544 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
22545 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
22546 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
22547 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
22548 ShAmt = ShAmt.getOperand(0);
22549 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
22550 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
22551 if (Subtarget.hasSSE41())
22552 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22553 MVT::v2i64, ShAmt);
22555 SDValue ByteShift = DAG.getTargetConstant(
22556 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
22557 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
22558 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22560 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22563 } else if (Subtarget.hasSSE41() &&
22564 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22565 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
22566 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22567 MVT::v2i64, ShAmt);
22569 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
22570 DAG.getUNDEF(SVT)};
22571 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
22574 // The return type has to be a 128-bit type with the same element
22575 // type as the input type.
22576 MVT EltVT = VT.getVectorElementType();
22577 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
22579 ShAmt = DAG.getBitcast(ShVT, ShAmt);
22580 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
22583 /// Return Mask with the necessary casting or extending
22584 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
22585 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
22586 const X86Subtarget &Subtarget, SelectionDAG &DAG,
22589 if (isAllOnesConstant(Mask))
22590 return DAG.getConstant(1, dl, MaskVT);
22591 if (X86::isZeroNode(Mask))
22592 return DAG.getConstant(0, dl, MaskVT);
22594 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
22596 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
22597 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
22598 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
22599 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
22601 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22602 DAG.getConstant(0, dl, MVT::i32));
22603 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22604 DAG.getConstant(1, dl, MVT::i32));
22606 Lo = DAG.getBitcast(MVT::v32i1, Lo);
22607 Hi = DAG.getBitcast(MVT::v32i1, Hi);
22609 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
22611 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
22612 Mask.getSimpleValueType().getSizeInBits());
22613 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
22614 // are extracted by EXTRACT_SUBVECTOR.
22615 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
22616 DAG.getBitcast(BitcastVT, Mask),
22617 DAG.getIntPtrConstant(0, dl));
22621 /// Return (and \p Op, \p Mask) for compare instructions or
22622 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
22623 /// necessary casting or extending for \p Mask when lowering masking intrinsics
22624 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
22625 SDValue PreservedSrc,
22626 const X86Subtarget &Subtarget,
22627 SelectionDAG &DAG) {
22628 MVT VT = Op.getSimpleValueType();
22629 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
22630 unsigned OpcodeSelect = ISD::VSELECT;
22633 if (isAllOnesConstant(Mask))
22636 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22638 if (PreservedSrc.isUndef())
22639 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22640 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
22643 /// Creates an SDNode for a predicated scalar operation.
22644 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
22645 /// The mask is coming as MVT::i8 and it should be transformed
22646 /// to MVT::v1i1 while lowering masking intrinsics.
22647 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
22648 /// "X86select" instead of "vselect". We just can't create the "vselect" node
22649 /// for a scalar instruction.
22650 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
22651 SDValue PreservedSrc,
22652 const X86Subtarget &Subtarget,
22653 SelectionDAG &DAG) {
22655 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
22656 if (MaskConst->getZExtValue() & 0x1)
22659 MVT VT = Op.getSimpleValueType();
22662 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
22663 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
22664 DAG.getBitcast(MVT::v8i1, Mask),
22665 DAG.getIntPtrConstant(0, dl));
22666 if (Op.getOpcode() == X86ISD::FSETCCM ||
22667 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
22668 Op.getOpcode() == X86ISD::VFPCLASSS)
22669 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
22671 if (PreservedSrc.isUndef())
22672 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22673 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
22676 static int getSEHRegistrationNodeSize(const Function *Fn) {
22677 if (!Fn->hasPersonalityFn())
22678 report_fatal_error(
22679 "querying registration node size for function without personality");
22680 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
22681 // WinEHStatePass for the full struct definition.
22682 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
22683 case EHPersonality::MSVC_X86SEH: return 24;
22684 case EHPersonality::MSVC_CXX: return 16;
22687 report_fatal_error(
22688 "can only recover FP for 32-bit MSVC EH personality functions");
22691 /// When the MSVC runtime transfers control to us, either to an outlined
22692 /// function or when returning to a parent frame after catching an exception, we
22693 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
22694 /// Here's the math:
22695 /// RegNodeBase = EntryEBP - RegNodeSize
22696 /// ParentFP = RegNodeBase - ParentFrameOffset
22697 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
22698 /// subtracting the offset (negative on x86) takes us back to the parent FP.
22699 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
22700 SDValue EntryEBP) {
22701 MachineFunction &MF = DAG.getMachineFunction();
22704 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22705 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
22707 // It's possible that the parent function no longer has a personality function
22708 // if the exceptional code was optimized away, in which case we just return
22709 // the incoming EBP.
22710 if (!Fn->hasPersonalityFn())
22713 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
22714 // registration, or the .set_setframe offset.
22715 MCSymbol *OffsetSym =
22716 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
22717 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
22718 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
22719 SDValue ParentFrameOffset =
22720 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
22722 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
22723 // prologue to RBP in the parent function.
22724 const X86Subtarget &Subtarget =
22725 static_cast<const X86Subtarget &>(DAG.getSubtarget());
22726 if (Subtarget.is64Bit())
22727 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
22729 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
22730 // RegNodeBase = EntryEBP - RegNodeSize
22731 // ParentFP = RegNodeBase - ParentFrameOffset
22732 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
22733 DAG.getConstant(RegNodeSize, dl, PtrVT));
22734 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
22737 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
22738 SelectionDAG &DAG) const {
22739 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
22740 auto isRoundModeCurDirection = [](SDValue Rnd) {
22741 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22742 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
22746 auto isRoundModeSAE = [](SDValue Rnd) {
22747 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22748 unsigned RC = C->getZExtValue();
22749 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22750 // Clear the NO_EXC bit and check remaining bits.
22751 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22752 // As a convenience we allow no other bits or explicitly
22753 // current direction.
22754 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
22760 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
22761 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22762 RC = C->getZExtValue();
22763 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22764 // Clear the NO_EXC bit and check remaining bits.
22765 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22766 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
22767 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
22768 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
22769 RC == X86::STATIC_ROUNDING::TO_ZERO;
22777 unsigned IntNo = Op.getConstantOperandVal(0);
22778 MVT VT = Op.getSimpleValueType();
22779 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
22781 switch(IntrData->Type) {
22782 case INTR_TYPE_1OP: {
22783 // We specify 2 possible opcodes for intrinsics with rounding modes.
22784 // First, we check if the intrinsic may have non-default rounding mode,
22785 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22786 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22787 if (IntrWithRoundingModeOpcode != 0) {
22788 SDValue Rnd = Op.getOperand(2);
22790 if (isRoundModeSAEToX(Rnd, RC))
22791 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22793 DAG.getTargetConstant(RC, dl, MVT::i32));
22794 if (!isRoundModeCurDirection(Rnd))
22797 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
22799 case INTR_TYPE_1OP_SAE: {
22800 SDValue Sae = Op.getOperand(2);
22803 if (isRoundModeCurDirection(Sae))
22804 Opc = IntrData->Opc0;
22805 else if (isRoundModeSAE(Sae))
22806 Opc = IntrData->Opc1;
22810 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
22812 case INTR_TYPE_2OP: {
22813 SDValue Src2 = Op.getOperand(2);
22815 // We specify 2 possible opcodes for intrinsics with rounding modes.
22816 // First, we check if the intrinsic may have non-default rounding mode,
22817 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22818 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22819 if (IntrWithRoundingModeOpcode != 0) {
22820 SDValue Rnd = Op.getOperand(3);
22822 if (isRoundModeSAEToX(Rnd, RC))
22823 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22824 Op.getOperand(1), Src2,
22825 DAG.getTargetConstant(RC, dl, MVT::i32));
22826 if (!isRoundModeCurDirection(Rnd))
22830 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22831 Op.getOperand(1), Src2);
22833 case INTR_TYPE_2OP_SAE: {
22834 SDValue Sae = Op.getOperand(3);
22837 if (isRoundModeCurDirection(Sae))
22838 Opc = IntrData->Opc0;
22839 else if (isRoundModeSAE(Sae))
22840 Opc = IntrData->Opc1;
22844 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
22847 case INTR_TYPE_3OP:
22848 case INTR_TYPE_3OP_IMM8: {
22849 SDValue Src1 = Op.getOperand(1);
22850 SDValue Src2 = Op.getOperand(2);
22851 SDValue Src3 = Op.getOperand(3);
22853 // We specify 2 possible opcodes for intrinsics with rounding modes.
22854 // First, we check if the intrinsic may have non-default rounding mode,
22855 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22856 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22857 if (IntrWithRoundingModeOpcode != 0) {
22858 SDValue Rnd = Op.getOperand(4);
22860 if (isRoundModeSAEToX(Rnd, RC))
22861 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22863 DAG.getTargetConstant(RC, dl, MVT::i32));
22864 if (!isRoundModeCurDirection(Rnd))
22868 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22871 case INTR_TYPE_4OP:
22872 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
22873 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
22874 case INTR_TYPE_1OP_MASK: {
22875 SDValue Src = Op.getOperand(1);
22876 SDValue PassThru = Op.getOperand(2);
22877 SDValue Mask = Op.getOperand(3);
22878 // We add rounding mode to the Node when
22879 // - RC Opcode is specified and
22880 // - RC is not "current direction".
22881 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22882 if (IntrWithRoundingModeOpcode != 0) {
22883 SDValue Rnd = Op.getOperand(4);
22885 if (isRoundModeSAEToX(Rnd, RC))
22886 return getVectorMaskingNode(
22887 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22888 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
22889 Mask, PassThru, Subtarget, DAG);
22890 if (!isRoundModeCurDirection(Rnd))
22893 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
22894 Mask, PassThru, Subtarget, DAG);
22896 case INTR_TYPE_1OP_MASK_SAE: {
22897 SDValue Src = Op.getOperand(1);
22898 SDValue PassThru = Op.getOperand(2);
22899 SDValue Mask = Op.getOperand(3);
22900 SDValue Rnd = Op.getOperand(4);
22903 if (isRoundModeCurDirection(Rnd))
22904 Opc = IntrData->Opc0;
22905 else if (isRoundModeSAE(Rnd))
22906 Opc = IntrData->Opc1;
22910 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src),
22911 Mask, PassThru, Subtarget, DAG);
22913 case INTR_TYPE_SCALAR_MASK: {
22914 SDValue Src1 = Op.getOperand(1);
22915 SDValue Src2 = Op.getOperand(2);
22916 SDValue passThru = Op.getOperand(3);
22917 SDValue Mask = Op.getOperand(4);
22918 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22919 // There are 2 kinds of intrinsics in this group:
22920 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
22921 // (2) With rounding mode and sae - 7 operands.
22922 bool HasRounding = IntrWithRoundingModeOpcode != 0;
22923 if (Op.getNumOperands() == (5U + HasRounding)) {
22925 SDValue Rnd = Op.getOperand(5);
22927 if (isRoundModeSAEToX(Rnd, RC))
22928 return getScalarMaskingNode(
22929 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
22930 DAG.getTargetConstant(RC, dl, MVT::i32)),
22931 Mask, passThru, Subtarget, DAG);
22932 if (!isRoundModeCurDirection(Rnd))
22935 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
22937 Mask, passThru, Subtarget, DAG);
22940 assert(Op.getNumOperands() == (6U + HasRounding) &&
22941 "Unexpected intrinsic form");
22942 SDValue RoundingMode = Op.getOperand(5);
22943 unsigned Opc = IntrData->Opc0;
22945 SDValue Sae = Op.getOperand(6);
22946 if (isRoundModeSAE(Sae))
22947 Opc = IntrWithRoundingModeOpcode;
22948 else if (!isRoundModeCurDirection(Sae))
22951 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
22952 Src2, RoundingMode),
22953 Mask, passThru, Subtarget, DAG);
22955 case INTR_TYPE_SCALAR_MASK_RND: {
22956 SDValue Src1 = Op.getOperand(1);
22957 SDValue Src2 = Op.getOperand(2);
22958 SDValue passThru = Op.getOperand(3);
22959 SDValue Mask = Op.getOperand(4);
22960 SDValue Rnd = Op.getOperand(5);
22964 if (isRoundModeCurDirection(Rnd))
22965 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22966 else if (isRoundModeSAEToX(Rnd, RC))
22967 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22968 DAG.getTargetConstant(RC, dl, MVT::i32));
22972 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
22974 case INTR_TYPE_SCALAR_MASK_SAE: {
22975 SDValue Src1 = Op.getOperand(1);
22976 SDValue Src2 = Op.getOperand(2);
22977 SDValue passThru = Op.getOperand(3);
22978 SDValue Mask = Op.getOperand(4);
22979 SDValue Sae = Op.getOperand(5);
22981 if (isRoundModeCurDirection(Sae))
22982 Opc = IntrData->Opc0;
22983 else if (isRoundModeSAE(Sae))
22984 Opc = IntrData->Opc1;
22988 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22989 Mask, passThru, Subtarget, DAG);
22991 case INTR_TYPE_2OP_MASK: {
22992 SDValue Src1 = Op.getOperand(1);
22993 SDValue Src2 = Op.getOperand(2);
22994 SDValue PassThru = Op.getOperand(3);
22995 SDValue Mask = Op.getOperand(4);
22997 if (IntrData->Opc1 != 0) {
22998 SDValue Rnd = Op.getOperand(5);
23000 if (isRoundModeSAEToX(Rnd, RC))
23001 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23002 DAG.getTargetConstant(RC, dl, MVT::i32));
23003 else if (!isRoundModeCurDirection(Rnd))
23007 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23008 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
23010 case INTR_TYPE_2OP_MASK_SAE: {
23011 SDValue Src1 = Op.getOperand(1);
23012 SDValue Src2 = Op.getOperand(2);
23013 SDValue PassThru = Op.getOperand(3);
23014 SDValue Mask = Op.getOperand(4);
23016 unsigned Opc = IntrData->Opc0;
23017 if (IntrData->Opc1 != 0) {
23018 SDValue Sae = Op.getOperand(5);
23019 if (isRoundModeSAE(Sae))
23020 Opc = IntrData->Opc1;
23021 else if (!isRoundModeCurDirection(Sae))
23025 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23026 Mask, PassThru, Subtarget, DAG);
23028 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
23029 SDValue Src1 = Op.getOperand(1);
23030 SDValue Src2 = Op.getOperand(2);
23031 SDValue Src3 = Op.getOperand(3);
23032 SDValue PassThru = Op.getOperand(4);
23033 SDValue Mask = Op.getOperand(5);
23034 SDValue Sae = Op.getOperand(6);
23036 if (isRoundModeCurDirection(Sae))
23037 Opc = IntrData->Opc0;
23038 else if (isRoundModeSAE(Sae))
23039 Opc = IntrData->Opc1;
23043 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23044 Mask, PassThru, Subtarget, DAG);
23046 case INTR_TYPE_3OP_MASK_SAE: {
23047 SDValue Src1 = Op.getOperand(1);
23048 SDValue Src2 = Op.getOperand(2);
23049 SDValue Src3 = Op.getOperand(3);
23050 SDValue PassThru = Op.getOperand(4);
23051 SDValue Mask = Op.getOperand(5);
23053 unsigned Opc = IntrData->Opc0;
23054 if (IntrData->Opc1 != 0) {
23055 SDValue Sae = Op.getOperand(6);
23056 if (isRoundModeSAE(Sae))
23057 Opc = IntrData->Opc1;
23058 else if (!isRoundModeCurDirection(Sae))
23061 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23062 Mask, PassThru, Subtarget, DAG);
23065 SDValue Src1 = Op.getOperand(1);
23066 SDValue Src2 = Op.getOperand(2);
23067 SDValue Src3 = Op.getOperand(3);
23069 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
23070 Src3 = DAG.getBitcast(MaskVT, Src3);
23072 // Reverse the operands to match VSELECT order.
23073 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
23076 SDValue Src1 = Op.getOperand(1);
23077 SDValue Src2 = Op.getOperand(2);
23079 // Swap Src1 and Src2 in the node creation
23080 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
23083 // NOTE: We need to swizzle the operands to pass the multiply operands
23085 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23086 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
23088 SDValue Src1 = Op.getOperand(1);
23089 SDValue Imm = Op.getOperand(2);
23090 SDValue Mask = Op.getOperand(3);
23091 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
23092 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
23094 // Need to fill with zeros to ensure the bitcast will produce zeroes
23095 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23096 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23097 DAG.getConstant(0, dl, MVT::v8i1),
23098 FPclassMask, DAG.getIntPtrConstant(0, dl));
23099 return DAG.getBitcast(MVT::i8, Ins);
23102 case CMP_MASK_CC: {
23103 MVT MaskVT = Op.getSimpleValueType();
23104 SDValue CC = Op.getOperand(3);
23105 // We specify 2 possible opcodes for intrinsics with rounding modes.
23106 // First, we check if the intrinsic may have non-default rounding mode,
23107 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23108 if (IntrData->Opc1 != 0) {
23109 SDValue Sae = Op.getOperand(4);
23110 if (isRoundModeSAE(Sae))
23111 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
23112 Op.getOperand(2), CC, Sae);
23113 if (!isRoundModeCurDirection(Sae))
23116 //default rounding mode
23117 return DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
23118 Op.getOperand(2), CC);
23120 case CMP_MASK_SCALAR_CC: {
23121 SDValue Src1 = Op.getOperand(1);
23122 SDValue Src2 = Op.getOperand(2);
23123 SDValue CC = Op.getOperand(3);
23124 SDValue Mask = Op.getOperand(4);
23127 if (IntrData->Opc1 != 0) {
23128 SDValue Sae = Op.getOperand(5);
23129 if (isRoundModeSAE(Sae))
23130 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
23131 else if (!isRoundModeCurDirection(Sae))
23134 //default rounding mode
23135 if (!Cmp.getNode())
23136 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
23138 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
23140 // Need to fill with zeros to ensure the bitcast will produce zeroes
23141 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23142 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23143 DAG.getConstant(0, dl, MVT::v8i1),
23144 CmpMask, DAG.getIntPtrConstant(0, dl));
23145 return DAG.getBitcast(MVT::i8, Ins);
23147 case COMI: { // Comparison intrinsics
23148 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
23149 SDValue LHS = Op.getOperand(1);
23150 SDValue RHS = Op.getOperand(2);
23151 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
23152 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
23155 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
23156 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
23157 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
23158 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
23161 case ISD::SETNE: { // (ZF = 1 or PF = 1)
23162 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
23163 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
23164 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
23167 case ISD::SETGT: // (CF = 0 and ZF = 0)
23168 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
23170 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
23171 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
23174 case ISD::SETGE: // CF = 0
23175 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
23177 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
23178 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
23181 llvm_unreachable("Unexpected illegal condition!");
23183 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23185 case COMI_RM: { // Comparison intrinsics with Sae
23186 SDValue LHS = Op.getOperand(1);
23187 SDValue RHS = Op.getOperand(2);
23188 unsigned CondVal = Op.getConstantOperandVal(3);
23189 SDValue Sae = Op.getOperand(4);
23192 if (isRoundModeCurDirection(Sae))
23193 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
23194 DAG.getTargetConstant(CondVal, dl, MVT::i8));
23195 else if (isRoundModeSAE(Sae))
23196 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
23197 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
23200 // Need to fill with zeros to ensure the bitcast will produce zeroes
23201 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23202 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
23203 DAG.getConstant(0, dl, MVT::v16i1),
23204 FCmp, DAG.getIntPtrConstant(0, dl));
23205 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
23206 DAG.getBitcast(MVT::i16, Ins));
23209 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
23210 Op.getOperand(1), Op.getOperand(2), Subtarget,
23212 case COMPRESS_EXPAND_IN_REG: {
23213 SDValue Mask = Op.getOperand(3);
23214 SDValue DataToCompress = Op.getOperand(1);
23215 SDValue PassThru = Op.getOperand(2);
23216 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
23217 return Op.getOperand(1);
23219 // Avoid false dependency.
23220 if (PassThru.isUndef())
23221 PassThru = DAG.getConstant(0, dl, VT);
23223 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
23227 case FIXUPIMM_MASKZ: {
23228 SDValue Src1 = Op.getOperand(1);
23229 SDValue Src2 = Op.getOperand(2);
23230 SDValue Src3 = Op.getOperand(3);
23231 SDValue Imm = Op.getOperand(4);
23232 SDValue Mask = Op.getOperand(5);
23233 SDValue Passthru = (IntrData->Type == FIXUPIMM)
23235 : getZeroVector(VT, Subtarget, DAG, dl);
23237 unsigned Opc = IntrData->Opc0;
23238 if (IntrData->Opc1 != 0) {
23239 SDValue Sae = Op.getOperand(6);
23240 if (isRoundModeSAE(Sae))
23241 Opc = IntrData->Opc1;
23242 else if (!isRoundModeCurDirection(Sae))
23246 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
23248 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
23249 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
23251 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
23254 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
23255 // Clear the upper bits of the rounding immediate so that the legacy
23256 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
23257 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
23258 SDValue RoundingMode =
23259 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
23260 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23261 Op.getOperand(1), RoundingMode);
23264 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
23265 // Clear the upper bits of the rounding immediate so that the legacy
23266 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
23267 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
23268 SDValue RoundingMode =
23269 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
23270 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23271 Op.getOperand(1), Op.getOperand(2), RoundingMode);
23274 assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
23276 // The control is a TargetConstant, but we need to convert it to a
23278 uint64_t Imm = Op.getConstantOperandVal(2);
23279 SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
23280 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23281 Op.getOperand(1), Control);
23285 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
23286 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
23289 // If the carry in is zero, then we should just use ADD/SUB instead of
23291 if (isNullConstant(Op.getOperand(1))) {
23292 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
23295 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
23296 DAG.getConstant(-1, dl, MVT::i8));
23297 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
23298 Op.getOperand(3), GenCF.getValue(1));
23300 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
23301 SDValue Results[] = { SetCC, Res };
23302 return DAG.getMergeValues(Results, dl);
23304 case CVTPD2PS_MASK:
23305 case CVTPD2DQ_MASK:
23306 case CVTQQ2PS_MASK:
23307 case TRUNCATE_TO_REG: {
23308 SDValue Src = Op.getOperand(1);
23309 SDValue PassThru = Op.getOperand(2);
23310 SDValue Mask = Op.getOperand(3);
23312 if (isAllOnesConstant(Mask))
23313 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
23315 MVT SrcVT = Src.getSimpleValueType();
23316 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
23317 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23318 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
23321 case CVTPS2PH_MASK: {
23322 SDValue Src = Op.getOperand(1);
23323 SDValue Rnd = Op.getOperand(2);
23324 SDValue PassThru = Op.getOperand(3);
23325 SDValue Mask = Op.getOperand(4);
23327 if (isAllOnesConstant(Mask))
23328 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
23330 MVT SrcVT = Src.getSimpleValueType();
23331 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
23332 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23333 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
23337 case CVTNEPS2BF16_MASK: {
23338 SDValue Src = Op.getOperand(1);
23339 SDValue PassThru = Op.getOperand(2);
23340 SDValue Mask = Op.getOperand(3);
23342 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
23343 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
23345 // Break false dependency.
23346 if (PassThru.isUndef())
23347 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
23349 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
23358 default: return SDValue(); // Don't custom lower most intrinsics.
23360 // ptest and testp intrinsics. The intrinsic these come from are designed to
23361 // return an integer value, not just an instruction so lower it to the ptest
23362 // or testp pattern and a setcc for the result.
23363 case Intrinsic::x86_avx512_ktestc_b:
23364 case Intrinsic::x86_avx512_ktestc_w:
23365 case Intrinsic::x86_avx512_ktestc_d:
23366 case Intrinsic::x86_avx512_ktestc_q:
23367 case Intrinsic::x86_avx512_ktestz_b:
23368 case Intrinsic::x86_avx512_ktestz_w:
23369 case Intrinsic::x86_avx512_ktestz_d:
23370 case Intrinsic::x86_avx512_ktestz_q:
23371 case Intrinsic::x86_sse41_ptestz:
23372 case Intrinsic::x86_sse41_ptestc:
23373 case Intrinsic::x86_sse41_ptestnzc:
23374 case Intrinsic::x86_avx_ptestz_256:
23375 case Intrinsic::x86_avx_ptestc_256:
23376 case Intrinsic::x86_avx_ptestnzc_256:
23377 case Intrinsic::x86_avx_vtestz_ps:
23378 case Intrinsic::x86_avx_vtestc_ps:
23379 case Intrinsic::x86_avx_vtestnzc_ps:
23380 case Intrinsic::x86_avx_vtestz_pd:
23381 case Intrinsic::x86_avx_vtestc_pd:
23382 case Intrinsic::x86_avx_vtestnzc_pd:
23383 case Intrinsic::x86_avx_vtestz_ps_256:
23384 case Intrinsic::x86_avx_vtestc_ps_256:
23385 case Intrinsic::x86_avx_vtestnzc_ps_256:
23386 case Intrinsic::x86_avx_vtestz_pd_256:
23387 case Intrinsic::x86_avx_vtestc_pd_256:
23388 case Intrinsic::x86_avx_vtestnzc_pd_256: {
23389 unsigned TestOpc = X86ISD::PTEST;
23390 X86::CondCode X86CC;
23392 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
23393 case Intrinsic::x86_avx512_ktestc_b:
23394 case Intrinsic::x86_avx512_ktestc_w:
23395 case Intrinsic::x86_avx512_ktestc_d:
23396 case Intrinsic::x86_avx512_ktestc_q:
23398 TestOpc = X86ISD::KTEST;
23399 X86CC = X86::COND_B;
23401 case Intrinsic::x86_avx512_ktestz_b:
23402 case Intrinsic::x86_avx512_ktestz_w:
23403 case Intrinsic::x86_avx512_ktestz_d:
23404 case Intrinsic::x86_avx512_ktestz_q:
23405 TestOpc = X86ISD::KTEST;
23406 X86CC = X86::COND_E;
23408 case Intrinsic::x86_avx_vtestz_ps:
23409 case Intrinsic::x86_avx_vtestz_pd:
23410 case Intrinsic::x86_avx_vtestz_ps_256:
23411 case Intrinsic::x86_avx_vtestz_pd_256:
23412 TestOpc = X86ISD::TESTP;
23414 case Intrinsic::x86_sse41_ptestz:
23415 case Intrinsic::x86_avx_ptestz_256:
23417 X86CC = X86::COND_E;
23419 case Intrinsic::x86_avx_vtestc_ps:
23420 case Intrinsic::x86_avx_vtestc_pd:
23421 case Intrinsic::x86_avx_vtestc_ps_256:
23422 case Intrinsic::x86_avx_vtestc_pd_256:
23423 TestOpc = X86ISD::TESTP;
23425 case Intrinsic::x86_sse41_ptestc:
23426 case Intrinsic::x86_avx_ptestc_256:
23428 X86CC = X86::COND_B;
23430 case Intrinsic::x86_avx_vtestnzc_ps:
23431 case Intrinsic::x86_avx_vtestnzc_pd:
23432 case Intrinsic::x86_avx_vtestnzc_ps_256:
23433 case Intrinsic::x86_avx_vtestnzc_pd_256:
23434 TestOpc = X86ISD::TESTP;
23436 case Intrinsic::x86_sse41_ptestnzc:
23437 case Intrinsic::x86_avx_ptestnzc_256:
23439 X86CC = X86::COND_A;
23443 SDValue LHS = Op.getOperand(1);
23444 SDValue RHS = Op.getOperand(2);
23445 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
23446 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
23447 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23450 case Intrinsic::x86_sse42_pcmpistria128:
23451 case Intrinsic::x86_sse42_pcmpestria128:
23452 case Intrinsic::x86_sse42_pcmpistric128:
23453 case Intrinsic::x86_sse42_pcmpestric128:
23454 case Intrinsic::x86_sse42_pcmpistrio128:
23455 case Intrinsic::x86_sse42_pcmpestrio128:
23456 case Intrinsic::x86_sse42_pcmpistris128:
23457 case Intrinsic::x86_sse42_pcmpestris128:
23458 case Intrinsic::x86_sse42_pcmpistriz128:
23459 case Intrinsic::x86_sse42_pcmpestriz128: {
23461 X86::CondCode X86CC;
23463 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
23464 case Intrinsic::x86_sse42_pcmpistria128:
23465 Opcode = X86ISD::PCMPISTR;
23466 X86CC = X86::COND_A;
23468 case Intrinsic::x86_sse42_pcmpestria128:
23469 Opcode = X86ISD::PCMPESTR;
23470 X86CC = X86::COND_A;
23472 case Intrinsic::x86_sse42_pcmpistric128:
23473 Opcode = X86ISD::PCMPISTR;
23474 X86CC = X86::COND_B;
23476 case Intrinsic::x86_sse42_pcmpestric128:
23477 Opcode = X86ISD::PCMPESTR;
23478 X86CC = X86::COND_B;
23480 case Intrinsic::x86_sse42_pcmpistrio128:
23481 Opcode = X86ISD::PCMPISTR;
23482 X86CC = X86::COND_O;
23484 case Intrinsic::x86_sse42_pcmpestrio128:
23485 Opcode = X86ISD::PCMPESTR;
23486 X86CC = X86::COND_O;
23488 case Intrinsic::x86_sse42_pcmpistris128:
23489 Opcode = X86ISD::PCMPISTR;
23490 X86CC = X86::COND_S;
23492 case Intrinsic::x86_sse42_pcmpestris128:
23493 Opcode = X86ISD::PCMPESTR;
23494 X86CC = X86::COND_S;
23496 case Intrinsic::x86_sse42_pcmpistriz128:
23497 Opcode = X86ISD::PCMPISTR;
23498 X86CC = X86::COND_E;
23500 case Intrinsic::x86_sse42_pcmpestriz128:
23501 Opcode = X86ISD::PCMPESTR;
23502 X86CC = X86::COND_E;
23505 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23506 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23507 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
23508 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
23509 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23512 case Intrinsic::x86_sse42_pcmpistri128:
23513 case Intrinsic::x86_sse42_pcmpestri128: {
23515 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
23516 Opcode = X86ISD::PCMPISTR;
23518 Opcode = X86ISD::PCMPESTR;
23520 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23521 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23522 return DAG.getNode(Opcode, dl, VTs, NewOps);
23525 case Intrinsic::x86_sse42_pcmpistrm128:
23526 case Intrinsic::x86_sse42_pcmpestrm128: {
23528 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
23529 Opcode = X86ISD::PCMPISTR;
23531 Opcode = X86ISD::PCMPESTR;
23533 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23534 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23535 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
23538 case Intrinsic::eh_sjlj_lsda: {
23539 MachineFunction &MF = DAG.getMachineFunction();
23540 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23541 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23542 auto &Context = MF.getMMI().getContext();
23543 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
23544 Twine(MF.getFunctionNumber()));
23545 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
23546 DAG.getMCSymbol(S, PtrVT));
23549 case Intrinsic::x86_seh_lsda: {
23550 // Compute the symbol for the LSDA. We know it'll get emitted later.
23551 MachineFunction &MF = DAG.getMachineFunction();
23552 SDValue Op1 = Op.getOperand(1);
23553 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
23554 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
23555 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23557 // Generate a simple absolute symbol reference. This intrinsic is only
23558 // supported on 32-bit Windows, which isn't PIC.
23559 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
23560 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
23563 case Intrinsic::eh_recoverfp: {
23564 SDValue FnOp = Op.getOperand(1);
23565 SDValue IncomingFPOp = Op.getOperand(2);
23566 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
23567 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
23569 report_fatal_error(
23570 "llvm.eh.recoverfp must take a function as the first argument");
23571 return recoverFramePointer(DAG, Fn, IncomingFPOp);
23574 case Intrinsic::localaddress: {
23575 // Returns one of the stack, base, or frame pointer registers, depending on
23576 // which is used to reference local variables.
23577 MachineFunction &MF = DAG.getMachineFunction();
23578 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23580 if (RegInfo->hasBasePointer(MF))
23581 Reg = RegInfo->getBaseRegister();
23582 else { // Handles the SP or FP case.
23583 bool CantUseFP = RegInfo->needsStackRealignment(MF);
23585 Reg = RegInfo->getPtrSizedStackRegister(MF);
23587 Reg = RegInfo->getPtrSizedFrameRegister(MF);
23589 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
23592 case Intrinsic::x86_avx512_vp2intersect_q_512:
23593 case Intrinsic::x86_avx512_vp2intersect_q_256:
23594 case Intrinsic::x86_avx512_vp2intersect_q_128:
23595 case Intrinsic::x86_avx512_vp2intersect_d_512:
23596 case Intrinsic::x86_avx512_vp2intersect_d_256:
23597 case Intrinsic::x86_avx512_vp2intersect_d_128: {
23598 MVT MaskVT = Op.getSimpleValueType();
23600 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
23603 SDValue Operation =
23604 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
23605 Op->getOperand(1), Op->getOperand(2));
23607 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
23608 MaskVT, Operation);
23609 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
23610 MaskVT, Operation);
23611 return DAG.getMergeValues({Result0, Result1}, DL);
23613 case Intrinsic::x86_mmx_pslli_w:
23614 case Intrinsic::x86_mmx_pslli_d:
23615 case Intrinsic::x86_mmx_pslli_q:
23616 case Intrinsic::x86_mmx_psrli_w:
23617 case Intrinsic::x86_mmx_psrli_d:
23618 case Intrinsic::x86_mmx_psrli_q:
23619 case Intrinsic::x86_mmx_psrai_w:
23620 case Intrinsic::x86_mmx_psrai_d: {
23622 SDValue ShAmt = Op.getOperand(2);
23623 // If the argument is a constant, convert it to a target constant.
23624 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
23625 ShAmt = DAG.getTargetConstant(C->getZExtValue(), DL, MVT::i32);
23626 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
23627 Op.getOperand(0), Op.getOperand(1), ShAmt);
23630 unsigned NewIntrinsic;
23632 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
23633 case Intrinsic::x86_mmx_pslli_w:
23634 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
23636 case Intrinsic::x86_mmx_pslli_d:
23637 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
23639 case Intrinsic::x86_mmx_pslli_q:
23640 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
23642 case Intrinsic::x86_mmx_psrli_w:
23643 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
23645 case Intrinsic::x86_mmx_psrli_d:
23646 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
23648 case Intrinsic::x86_mmx_psrli_q:
23649 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
23651 case Intrinsic::x86_mmx_psrai_w:
23652 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
23654 case Intrinsic::x86_mmx_psrai_d:
23655 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
23659 // The vector shift intrinsics with scalars uses 32b shift amounts but
23660 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
23662 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
23663 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
23664 DAG.getConstant(NewIntrinsic, DL, MVT::i32),
23665 Op.getOperand(1), ShAmt);
23671 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23672 SDValue Src, SDValue Mask, SDValue Base,
23673 SDValue Index, SDValue ScaleOp, SDValue Chain,
23674 const X86Subtarget &Subtarget) {
23676 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23677 // Scale must be constant.
23680 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23681 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23682 TLI.getPointerTy(DAG.getDataLayout()));
23683 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
23684 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23685 // If source is undef or we know it won't be used, use a zero vector
23686 // to break register dependency.
23687 // TODO: use undef instead and let BreakFalseDeps deal with it?
23688 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23689 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23691 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23693 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23694 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23695 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23696 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23699 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
23700 SDValue Src, SDValue Mask, SDValue Base,
23701 SDValue Index, SDValue ScaleOp, SDValue Chain,
23702 const X86Subtarget &Subtarget) {
23703 MVT VT = Op.getSimpleValueType();
23705 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23706 // Scale must be constant.
23709 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23710 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23711 TLI.getPointerTy(DAG.getDataLayout()));
23712 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23713 VT.getVectorNumElements());
23714 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23716 // We support two versions of the gather intrinsics. One with scalar mask and
23717 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23718 if (Mask.getValueType() != MaskVT)
23719 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23721 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23722 // If source is undef or we know it won't be used, use a zero vector
23723 // to break register dependency.
23724 // TODO: use undef instead and let BreakFalseDeps deal with it?
23725 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23726 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23728 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23730 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23731 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23732 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23733 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23736 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23737 SDValue Src, SDValue Mask, SDValue Base,
23738 SDValue Index, SDValue ScaleOp, SDValue Chain,
23739 const X86Subtarget &Subtarget) {
23741 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23742 // Scale must be constant.
23745 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23746 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23747 TLI.getPointerTy(DAG.getDataLayout()));
23748 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23749 Src.getSimpleValueType().getVectorNumElements());
23750 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23752 // We support two versions of the scatter intrinsics. One with scalar mask and
23753 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23754 if (Mask.getValueType() != MaskVT)
23755 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23757 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23759 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
23760 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
23761 SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
23762 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23763 return Res.getValue(1);
23766 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23767 SDValue Mask, SDValue Base, SDValue Index,
23768 SDValue ScaleOp, SDValue Chain,
23769 const X86Subtarget &Subtarget) {
23771 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23772 // Scale must be constant.
23775 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23776 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23777 TLI.getPointerTy(DAG.getDataLayout()));
23778 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
23779 SDValue Segment = DAG.getRegister(0, MVT::i32);
23781 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
23782 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23783 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
23784 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
23785 return SDValue(Res, 0);
23788 /// Handles the lowering of builtin intrinsics with chain that return their
23789 /// value into registers EDX:EAX.
23790 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
23791 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
23793 /// Returns a Glue value which can be used to add extra copy-from-reg if the
23794 /// expanded intrinsics implicitly defines extra registers (i.e. not just
23796 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
23798 unsigned TargetOpcode,
23800 const X86Subtarget &Subtarget,
23801 SmallVectorImpl<SDValue> &Results) {
23802 SDValue Chain = N->getOperand(0);
23806 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
23807 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
23808 Glue = Chain.getValue(1);
23811 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
23812 SDValue N1Ops[] = {Chain, Glue};
23813 SDNode *N1 = DAG.getMachineNode(
23814 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
23815 Chain = SDValue(N1, 0);
23817 // Reads the content of XCR and returns it in registers EDX:EAX.
23819 if (Subtarget.is64Bit()) {
23820 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
23821 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
23824 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
23825 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
23828 Chain = HI.getValue(1);
23829 Glue = HI.getValue(2);
23831 if (Subtarget.is64Bit()) {
23832 // Merge the two 32-bit values into a 64-bit one.
23833 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
23834 DAG.getConstant(32, DL, MVT::i8));
23835 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
23836 Results.push_back(Chain);
23840 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
23841 SDValue Ops[] = { LO, HI };
23842 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
23843 Results.push_back(Pair);
23844 Results.push_back(Chain);
23848 /// Handles the lowering of builtin intrinsics that read the time stamp counter
23849 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
23850 /// READCYCLECOUNTER nodes.
23851 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
23853 const X86Subtarget &Subtarget,
23854 SmallVectorImpl<SDValue> &Results) {
23855 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
23856 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
23857 // and the EAX register is loaded with the low-order 32 bits.
23858 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
23859 /* NoRegister */0, Subtarget,
23861 if (Opcode != X86::RDTSCP)
23864 SDValue Chain = Results[1];
23865 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
23866 // the ECX register. Add 'ecx' explicitly to the chain.
23867 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
23869 Results.push_back(ecx.getValue(1));
23872 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
23873 SelectionDAG &DAG) {
23874 SmallVector<SDValue, 3> Results;
23876 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
23878 return DAG.getMergeValues(Results, DL);
23881 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
23882 MachineFunction &MF = DAG.getMachineFunction();
23883 SDValue Chain = Op.getOperand(0);
23884 SDValue RegNode = Op.getOperand(2);
23885 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23887 report_fatal_error("EH registrations only live in functions using WinEH");
23889 // Cast the operand to an alloca, and remember the frame index.
23890 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
23892 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
23893 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
23895 // Return the chain operand without making any DAG nodes.
23899 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
23900 MachineFunction &MF = DAG.getMachineFunction();
23901 SDValue Chain = Op.getOperand(0);
23902 SDValue EHGuard = Op.getOperand(2);
23903 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23905 report_fatal_error("EHGuard only live in functions using WinEH");
23907 // Cast the operand to an alloca, and remember the frame index.
23908 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
23910 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
23911 EHInfo->EHGuardFrameIndex = FINode->getIndex();
23913 // Return the chain operand without making any DAG nodes.
23917 /// Emit Truncating Store with signed or unsigned saturation.
23919 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
23920 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
23921 SelectionDAG &DAG) {
23923 SDVTList VTs = DAG.getVTList(MVT::Other);
23924 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
23925 SDValue Ops[] = { Chain, Val, Ptr, Undef };
23927 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23928 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23931 /// Emit Masked Truncating Store with signed or unsigned saturation.
23933 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
23934 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
23935 MachineMemOperand *MMO, SelectionDAG &DAG) {
23937 SDVTList VTs = DAG.getVTList(MVT::Other);
23938 SDValue Ops[] = { Chain, Val, Ptr, Mask };
23940 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23941 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23944 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
23945 SelectionDAG &DAG) {
23946 unsigned IntNo = Op.getConstantOperandVal(1);
23947 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
23950 case llvm::Intrinsic::x86_seh_ehregnode:
23951 return MarkEHRegistrationNode(Op, DAG);
23952 case llvm::Intrinsic::x86_seh_ehguard:
23953 return MarkEHGuard(Op, DAG);
23954 case llvm::Intrinsic::x86_rdpkru: {
23956 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23957 // Create a RDPKRU node and pass 0 to the ECX parameter.
23958 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
23959 DAG.getConstant(0, dl, MVT::i32));
23961 case llvm::Intrinsic::x86_wrpkru: {
23963 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
23964 // to the EDX and ECX parameters.
23965 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
23966 Op.getOperand(0), Op.getOperand(2),
23967 DAG.getConstant(0, dl, MVT::i32),
23968 DAG.getConstant(0, dl, MVT::i32));
23970 case llvm::Intrinsic::x86_flags_read_u32:
23971 case llvm::Intrinsic::x86_flags_read_u64:
23972 case llvm::Intrinsic::x86_flags_write_u32:
23973 case llvm::Intrinsic::x86_flags_write_u64: {
23974 // We need a frame pointer because this will get lowered to a PUSH/POP
23976 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23977 MFI.setHasCopyImplyingStackAdjustment(true);
23978 // Don't do anything here, we will expand these intrinsics out later
23979 // during FinalizeISel in EmitInstrWithCustomInserter.
23982 case Intrinsic::x86_lwpins32:
23983 case Intrinsic::x86_lwpins64:
23984 case Intrinsic::x86_umwait:
23985 case Intrinsic::x86_tpause: {
23987 SDValue Chain = Op->getOperand(0);
23988 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23992 default: llvm_unreachable("Impossible intrinsic");
23993 case Intrinsic::x86_umwait:
23994 Opcode = X86ISD::UMWAIT;
23996 case Intrinsic::x86_tpause:
23997 Opcode = X86ISD::TPAUSE;
23999 case Intrinsic::x86_lwpins32:
24000 case Intrinsic::x86_lwpins64:
24001 Opcode = X86ISD::LWPINS;
24005 SDValue Operation =
24006 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
24007 Op->getOperand(3), Op->getOperand(4));
24008 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
24009 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24010 Operation.getValue(1));
24012 case Intrinsic::x86_enqcmd:
24013 case Intrinsic::x86_enqcmds: {
24015 SDValue Chain = Op.getOperand(0);
24016 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24019 default: llvm_unreachable("Impossible intrinsic!");
24020 case Intrinsic::x86_enqcmd:
24021 Opcode = X86ISD::ENQCMD;
24023 case Intrinsic::x86_enqcmds:
24024 Opcode = X86ISD::ENQCMDS;
24027 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
24029 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
24030 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24031 Operation.getValue(1));
24038 switch(IntrData->Type) {
24039 default: llvm_unreachable("Unknown Intrinsic Type");
24042 // Emit the node with the right value type.
24043 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
24044 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24046 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
24047 // Otherwise return the value from Rand, which is always 0, casted to i32.
24048 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
24049 DAG.getConstant(1, dl, Op->getValueType(1)),
24050 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
24051 SDValue(Result.getNode(), 1)};
24052 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
24054 // Return { result, isValid, chain }.
24055 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
24056 SDValue(Result.getNode(), 2));
24058 case GATHER_AVX2: {
24059 SDValue Chain = Op.getOperand(0);
24060 SDValue Src = Op.getOperand(2);
24061 SDValue Base = Op.getOperand(3);
24062 SDValue Index = Op.getOperand(4);
24063 SDValue Mask = Op.getOperand(5);
24064 SDValue Scale = Op.getOperand(6);
24065 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24066 Scale, Chain, Subtarget);
24069 //gather(v1, mask, index, base, scale);
24070 SDValue Chain = Op.getOperand(0);
24071 SDValue Src = Op.getOperand(2);
24072 SDValue Base = Op.getOperand(3);
24073 SDValue Index = Op.getOperand(4);
24074 SDValue Mask = Op.getOperand(5);
24075 SDValue Scale = Op.getOperand(6);
24076 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
24080 //scatter(base, mask, index, v1, scale);
24081 SDValue Chain = Op.getOperand(0);
24082 SDValue Base = Op.getOperand(2);
24083 SDValue Mask = Op.getOperand(3);
24084 SDValue Index = Op.getOperand(4);
24085 SDValue Src = Op.getOperand(5);
24086 SDValue Scale = Op.getOperand(6);
24087 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24088 Scale, Chain, Subtarget);
24091 const APInt &HintVal = Op.getConstantOperandAPInt(6);
24092 assert((HintVal == 2 || HintVal == 3) &&
24093 "Wrong prefetch hint in intrinsic: should be 2 or 3");
24094 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
24095 SDValue Chain = Op.getOperand(0);
24096 SDValue Mask = Op.getOperand(2);
24097 SDValue Index = Op.getOperand(3);
24098 SDValue Base = Op.getOperand(4);
24099 SDValue Scale = Op.getOperand(5);
24100 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
24103 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
24105 SmallVector<SDValue, 2> Results;
24106 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
24108 return DAG.getMergeValues(Results, dl);
24110 // Read Performance Monitoring Counters.
24112 // GetExtended Control Register.
24114 SmallVector<SDValue, 2> Results;
24116 // RDPMC uses ECX to select the index of the performance counter to read.
24117 // XGETBV uses ECX to select the index of the XCR register to return.
24118 // The result is stored into registers EDX:EAX.
24119 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
24120 Subtarget, Results);
24121 return DAG.getMergeValues(Results, dl);
24123 // XTEST intrinsics.
24125 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
24126 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24128 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
24129 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
24130 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
24131 Ret, SDValue(InTrans.getNode(), 1));
24133 case TRUNCATE_TO_MEM_VI8:
24134 case TRUNCATE_TO_MEM_VI16:
24135 case TRUNCATE_TO_MEM_VI32: {
24136 SDValue Mask = Op.getOperand(4);
24137 SDValue DataToTruncate = Op.getOperand(3);
24138 SDValue Addr = Op.getOperand(2);
24139 SDValue Chain = Op.getOperand(0);
24141 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
24142 assert(MemIntr && "Expected MemIntrinsicSDNode!");
24144 EVT MemVT = MemIntr->getMemoryVT();
24146 uint16_t TruncationOp = IntrData->Opc0;
24147 switch (TruncationOp) {
24148 case X86ISD::VTRUNC: {
24149 if (isAllOnesConstant(Mask)) // return just a truncate store
24150 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
24151 MemIntr->getMemOperand());
24153 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
24154 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24156 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
24157 MemIntr->getMemOperand(), true /* truncating */);
24159 case X86ISD::VTRUNCUS:
24160 case X86ISD::VTRUNCS: {
24161 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
24162 if (isAllOnesConstant(Mask))
24163 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
24164 MemIntr->getMemOperand(), DAG);
24166 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
24167 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24169 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
24170 VMask, MemVT, MemIntr->getMemOperand(), DAG);
24173 llvm_unreachable("Unsupported truncstore intrinsic");
24179 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
24180 SelectionDAG &DAG) const {
24181 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
24182 MFI.setReturnAddressIsTaken(true);
24184 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
24187 unsigned Depth = Op.getConstantOperandVal(0);
24189 EVT PtrVT = getPointerTy(DAG.getDataLayout());
24192 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
24193 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24194 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
24195 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
24196 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
24197 MachinePointerInfo());
24200 // Just load the return address.
24201 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
24202 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
24203 MachinePointerInfo());
24206 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
24207 SelectionDAG &DAG) const {
24208 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
24209 return getReturnAddressFrameIndex(DAG);
24212 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
24213 MachineFunction &MF = DAG.getMachineFunction();
24214 MachineFrameInfo &MFI = MF.getFrameInfo();
24215 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
24216 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24217 EVT VT = Op.getValueType();
24219 MFI.setFrameAddressIsTaken(true);
24221 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
24222 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
24223 // is not possible to crawl up the stack without looking at the unwind codes
24225 int FrameAddrIndex = FuncInfo->getFAIndex();
24226 if (!FrameAddrIndex) {
24227 // Set up a frame object for the return address.
24228 unsigned SlotSize = RegInfo->getSlotSize();
24229 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
24230 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
24231 FuncInfo->setFAIndex(FrameAddrIndex);
24233 return DAG.getFrameIndex(FrameAddrIndex, VT);
24236 unsigned FrameReg =
24237 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
24238 SDLoc dl(Op); // FIXME probably not meaningful
24239 unsigned Depth = Op.getConstantOperandVal(0);
24240 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
24241 (FrameReg == X86::EBP && VT == MVT::i32)) &&
24242 "Invalid Frame Register!");
24243 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
24245 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
24246 MachinePointerInfo());
24250 // FIXME? Maybe this could be a TableGen attribute on some registers and
24251 // this table could be generated automatically from RegInfo.
24252 Register X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
24253 const MachineFunction &MF) const {
24254 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24256 Register Reg = StringSwitch<unsigned>(RegName)
24257 .Case("esp", X86::ESP)
24258 .Case("rsp", X86::RSP)
24259 .Case("ebp", X86::EBP)
24260 .Case("rbp", X86::RBP)
24263 if (Reg == X86::EBP || Reg == X86::RBP) {
24264 if (!TFI.hasFP(MF))
24265 report_fatal_error("register " + StringRef(RegName) +
24266 " is allocatable: function has no frame pointer");
24269 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24270 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
24271 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
24272 "Invalid Frame Register!");
24280 report_fatal_error("Invalid register name global variable");
24283 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
24284 SelectionDAG &DAG) const {
24285 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24286 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
24289 unsigned X86TargetLowering::getExceptionPointerRegister(
24290 const Constant *PersonalityFn) const {
24291 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
24292 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
24294 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
24297 unsigned X86TargetLowering::getExceptionSelectorRegister(
24298 const Constant *PersonalityFn) const {
24299 // Funclet personalities don't use selectors (the runtime does the selection).
24300 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
24301 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
24304 bool X86TargetLowering::needsFixedCatchObjects() const {
24305 return Subtarget.isTargetWin64();
24308 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
24309 SDValue Chain = Op.getOperand(0);
24310 SDValue Offset = Op.getOperand(1);
24311 SDValue Handler = Op.getOperand(2);
24314 EVT PtrVT = getPointerTy(DAG.getDataLayout());
24315 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24316 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
24317 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
24318 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
24319 "Invalid Frame Register!");
24320 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
24321 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
24323 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
24324 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
24326 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
24327 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
24328 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
24330 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
24331 DAG.getRegister(StoreAddrReg, PtrVT));
24334 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
24335 SelectionDAG &DAG) const {
24337 // If the subtarget is not 64bit, we may need the global base reg
24338 // after isel expand pseudo, i.e., after CGBR pass ran.
24339 // Therefore, ask for the GlobalBaseReg now, so that the pass
24340 // inserts the code for us in case we need it.
24341 // Otherwise, we will end up in a situation where we will
24342 // reference a virtual register that is not defined!
24343 if (!Subtarget.is64Bit()) {
24344 const X86InstrInfo *TII = Subtarget.getInstrInfo();
24345 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
24347 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
24348 DAG.getVTList(MVT::i32, MVT::Other),
24349 Op.getOperand(0), Op.getOperand(1));
24352 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
24353 SelectionDAG &DAG) const {
24355 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
24356 Op.getOperand(0), Op.getOperand(1));
24359 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
24360 SelectionDAG &DAG) const {
24362 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
24366 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
24367 return Op.getOperand(0);
24370 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
24371 SelectionDAG &DAG) const {
24372 SDValue Root = Op.getOperand(0);
24373 SDValue Trmp = Op.getOperand(1); // trampoline
24374 SDValue FPtr = Op.getOperand(2); // nested function
24375 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
24378 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24379 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
24381 if (Subtarget.is64Bit()) {
24382 SDValue OutChains[6];
24384 // Large code-model.
24385 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
24386 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
24388 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
24389 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
24391 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
24393 // Load the pointer to the nested function into R11.
24394 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
24395 SDValue Addr = Trmp;
24396 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24397 Addr, MachinePointerInfo(TrmpAddr));
24399 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24400 DAG.getConstant(2, dl, MVT::i64));
24402 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
24403 /* Alignment = */ 2);
24405 // Load the 'nest' parameter value into R10.
24406 // R10 is specified in X86CallingConv.td
24407 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
24408 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24409 DAG.getConstant(10, dl, MVT::i64));
24410 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24411 Addr, MachinePointerInfo(TrmpAddr, 10));
24413 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24414 DAG.getConstant(12, dl, MVT::i64));
24416 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
24417 /* Alignment = */ 2);
24419 // Jump to the nested function.
24420 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
24421 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24422 DAG.getConstant(20, dl, MVT::i64));
24423 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24424 Addr, MachinePointerInfo(TrmpAddr, 20));
24426 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
24427 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24428 DAG.getConstant(22, dl, MVT::i64));
24429 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
24430 Addr, MachinePointerInfo(TrmpAddr, 22));
24432 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
24434 const Function *Func =
24435 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
24436 CallingConv::ID CC = Func->getCallingConv();
24441 llvm_unreachable("Unsupported calling convention");
24442 case CallingConv::C:
24443 case CallingConv::X86_StdCall: {
24444 // Pass 'nest' parameter in ECX.
24445 // Must be kept in sync with X86CallingConv.td
24446 NestReg = X86::ECX;
24448 // Check that ECX wasn't needed by an 'inreg' parameter.
24449 FunctionType *FTy = Func->getFunctionType();
24450 const AttributeList &Attrs = Func->getAttributes();
24452 if (!Attrs.isEmpty() && !Func->isVarArg()) {
24453 unsigned InRegCount = 0;
24456 for (FunctionType::param_iterator I = FTy->param_begin(),
24457 E = FTy->param_end(); I != E; ++I, ++Idx)
24458 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
24459 auto &DL = DAG.getDataLayout();
24460 // FIXME: should only count parameters that are lowered to integers.
24461 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
24464 if (InRegCount > 2) {
24465 report_fatal_error("Nest register in use - reduce number of inreg"
24471 case CallingConv::X86_FastCall:
24472 case CallingConv::X86_ThisCall:
24473 case CallingConv::Fast:
24474 case CallingConv::Tail:
24475 // Pass 'nest' parameter in EAX.
24476 // Must be kept in sync with X86CallingConv.td
24477 NestReg = X86::EAX;
24481 SDValue OutChains[4];
24482 SDValue Addr, Disp;
24484 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24485 DAG.getConstant(10, dl, MVT::i32));
24486 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
24488 // This is storing the opcode for MOV32ri.
24489 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
24490 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
24492 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
24493 Trmp, MachinePointerInfo(TrmpAddr));
24495 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24496 DAG.getConstant(1, dl, MVT::i32));
24498 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
24499 /* Alignment = */ 1);
24501 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
24502 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24503 DAG.getConstant(5, dl, MVT::i32));
24504 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
24505 Addr, MachinePointerInfo(TrmpAddr, 5),
24506 /* Alignment = */ 1);
24508 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24509 DAG.getConstant(6, dl, MVT::i32));
24511 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
24512 /* Alignment = */ 1);
24514 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
24518 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
24519 SelectionDAG &DAG) const {
24521 The rounding mode is in bits 11:10 of FPSR, and has the following
24523 00 Round to nearest
24528 FLT_ROUNDS, on the other hand, expects the following:
24535 To perform the conversion, we do:
24536 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
24539 MachineFunction &MF = DAG.getMachineFunction();
24540 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24541 unsigned StackAlignment = TFI.getStackAlignment();
24542 MVT VT = Op.getSimpleValueType();
24545 // Save FP Control Word to stack slot
24546 int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
24547 SDValue StackSlot =
24548 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
24550 MachineMemOperand *MMO =
24551 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
24552 MachineMemOperand::MOStore, 2, 2);
24554 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
24555 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
24556 DAG.getVTList(MVT::Other),
24557 Ops, MVT::i16, MMO);
24559 // Load FP Control Word from stack slot
24561 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
24563 // Transform as necessary
24565 DAG.getNode(ISD::SRL, DL, MVT::i16,
24566 DAG.getNode(ISD::AND, DL, MVT::i16,
24567 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
24568 DAG.getConstant(11, DL, MVT::i8));
24570 DAG.getNode(ISD::SRL, DL, MVT::i16,
24571 DAG.getNode(ISD::AND, DL, MVT::i16,
24572 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
24573 DAG.getConstant(9, DL, MVT::i8));
24576 DAG.getNode(ISD::AND, DL, MVT::i16,
24577 DAG.getNode(ISD::ADD, DL, MVT::i16,
24578 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
24579 DAG.getConstant(1, DL, MVT::i16)),
24580 DAG.getConstant(3, DL, MVT::i16));
24582 return DAG.getNode((VT.getSizeInBits() < 16 ?
24583 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
24586 // Split an unary integer op into 2 half sized ops.
24587 static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
24588 MVT VT = Op.getSimpleValueType();
24589 unsigned NumElems = VT.getVectorNumElements();
24590 unsigned SizeInBits = VT.getSizeInBits();
24591 MVT EltVT = VT.getVectorElementType();
24592 SDValue Src = Op.getOperand(0);
24593 assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
24594 "Src and Op should have the same element type!");
24596 // Extract the Lo/Hi vectors
24598 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
24599 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
24601 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
24602 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24603 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
24604 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
24607 // Decompose 256-bit ops into smaller 128-bit ops.
24608 static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
24609 assert(Op.getSimpleValueType().is256BitVector() &&
24610 Op.getSimpleValueType().isInteger() &&
24611 "Only handle AVX 256-bit vector integer operation");
24612 return LowerVectorIntUnary(Op, DAG);
24615 // Decompose 512-bit ops into smaller 256-bit ops.
24616 static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
24617 assert(Op.getSimpleValueType().is512BitVector() &&
24618 Op.getSimpleValueType().isInteger() &&
24619 "Only handle AVX 512-bit vector integer operation");
24620 return LowerVectorIntUnary(Op, DAG);
24623 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
24625 // i8/i16 vector implemented using dword LZCNT vector instruction
24626 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
24627 // split the vector, perform operation on it's Lo a Hi part and
24628 // concatenate the results.
24629 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
24630 const X86Subtarget &Subtarget) {
24631 assert(Op.getOpcode() == ISD::CTLZ);
24633 MVT VT = Op.getSimpleValueType();
24634 MVT EltVT = VT.getVectorElementType();
24635 unsigned NumElems = VT.getVectorNumElements();
24637 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
24638 "Unsupported element type");
24640 // Split vector, it's Lo and Hi parts will be handled in next iteration.
24641 if (NumElems > 16 ||
24642 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
24643 return LowerVectorIntUnary(Op, DAG);
24645 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
24646 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
24647 "Unsupported value type for operation");
24649 // Use native supported vector instruction vplzcntd.
24650 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
24651 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
24652 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
24653 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
24655 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
24658 // Lower CTLZ using a PSHUFB lookup table implementation.
24659 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
24660 const X86Subtarget &Subtarget,
24661 SelectionDAG &DAG) {
24662 MVT VT = Op.getSimpleValueType();
24663 int NumElts = VT.getVectorNumElements();
24664 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
24665 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
24667 // Per-nibble leading zero PSHUFB lookup table.
24668 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
24669 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
24670 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
24671 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
24673 SmallVector<SDValue, 64> LUTVec;
24674 for (int i = 0; i < NumBytes; ++i)
24675 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
24676 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
24678 // Begin by bitcasting the input to byte vector, then split those bytes
24679 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
24680 // If the hi input nibble is zero then we add both results together, otherwise
24681 // we just take the hi result (by masking the lo result to zero before the
24683 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
24684 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
24686 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
24688 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
24690 if (CurrVT.is512BitVector()) {
24691 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24692 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
24693 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24695 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
24698 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
24699 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
24700 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
24701 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
24703 // Merge result back from vXi8 back to VT, working on the lo/hi halves
24704 // of the current vector width in the same way we did for the nibbles.
24705 // If the upper half of the input element is zero then add the halves'
24706 // leading zero counts together, otherwise just use the upper half's.
24707 // Double the width of the result until we are at target width.
24708 while (CurrVT != VT) {
24709 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
24710 int CurrNumElts = CurrVT.getVectorNumElements();
24711 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
24712 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
24713 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
24715 // Check if the upper half of the input element is zero.
24716 if (CurrVT.is512BitVector()) {
24717 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24718 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
24719 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24720 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24722 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
24723 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24725 HiZ = DAG.getBitcast(NextVT, HiZ);
24727 // Move the upper/lower halves to the lower bits as we'll be extending to
24728 // NextVT. Mask the lower result to zero if HiZ is true and add the results
24730 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
24731 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
24732 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
24733 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
24734 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
24741 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
24742 const X86Subtarget &Subtarget,
24743 SelectionDAG &DAG) {
24744 MVT VT = Op.getSimpleValueType();
24746 if (Subtarget.hasCDI() &&
24747 // vXi8 vectors need to be promoted to 512-bits for vXi32.
24748 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
24749 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
24751 // Decompose 256-bit ops into smaller 128-bit ops.
24752 if (VT.is256BitVector() && !Subtarget.hasInt256())
24753 return Lower256IntUnary(Op, DAG);
24755 // Decompose 512-bit ops into smaller 256-bit ops.
24756 if (VT.is512BitVector() && !Subtarget.hasBWI())
24757 return Lower512IntUnary(Op, DAG);
24759 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
24760 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
24763 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
24764 SelectionDAG &DAG) {
24765 MVT VT = Op.getSimpleValueType();
24767 unsigned NumBits = VT.getSizeInBits();
24769 unsigned Opc = Op.getOpcode();
24772 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
24774 Op = Op.getOperand(0);
24775 if (VT == MVT::i8) {
24776 // Zero extend to i32 since there is not an i8 bsr.
24778 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
24781 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
24782 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
24783 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
24785 if (Opc == ISD::CTLZ) {
24786 // If src is zero (i.e. bsr sets ZF), returns NumBits.
24787 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
24788 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
24790 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
24793 // Finally xor with NumBits-1.
24794 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
24795 DAG.getConstant(NumBits - 1, dl, OpVT));
24798 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
24802 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
24803 SelectionDAG &DAG) {
24804 MVT VT = Op.getSimpleValueType();
24805 unsigned NumBits = VT.getScalarSizeInBits();
24806 SDValue N0 = Op.getOperand(0);
24809 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
24810 "Only scalar CTTZ requires custom lowering");
24812 // Issue a bsf (scan bits forward) which also sets EFLAGS.
24813 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
24814 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
24816 // If src is zero (i.e. bsf sets ZF), returns NumBits.
24817 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
24818 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
24820 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
24823 /// Break a 256-bit integer operation into two new 128-bit ones and then
24824 /// concatenate the result back.
24825 static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
24826 MVT VT = Op.getSimpleValueType();
24828 assert(VT.is256BitVector() && VT.isInteger() &&
24829 "Unsupported value type for operation");
24831 unsigned NumElems = VT.getVectorNumElements();
24834 // Extract the LHS vectors
24835 SDValue LHS = Op.getOperand(0);
24836 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
24837 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
24839 // Extract the RHS vectors
24840 SDValue RHS = Op.getOperand(1);
24841 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
24842 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
24844 MVT EltVT = VT.getVectorElementType();
24845 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24847 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24848 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24849 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24852 /// Break a 512-bit integer operation into two new 256-bit ones and then
24853 /// concatenate the result back.
24854 static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
24855 MVT VT = Op.getSimpleValueType();
24857 assert(VT.is512BitVector() && VT.isInteger() &&
24858 "Unsupported value type for operation");
24860 unsigned NumElems = VT.getVectorNumElements();
24863 // Extract the LHS vectors
24864 SDValue LHS = Op.getOperand(0);
24865 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
24866 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
24868 // Extract the RHS vectors
24869 SDValue RHS = Op.getOperand(1);
24870 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
24871 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
24873 MVT EltVT = VT.getVectorElementType();
24874 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24876 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24877 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24878 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24881 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
24882 const X86Subtarget &Subtarget) {
24883 MVT VT = Op.getSimpleValueType();
24884 if (VT == MVT::i16 || VT == MVT::i32)
24885 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
24887 if (VT.getScalarType() == MVT::i1)
24888 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
24889 Op.getOperand(0), Op.getOperand(1));
24891 assert(Op.getSimpleValueType().is256BitVector() &&
24892 Op.getSimpleValueType().isInteger() &&
24893 "Only handle AVX 256-bit vector integer operation");
24894 return split256IntArith(Op, DAG);
24897 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
24898 const X86Subtarget &Subtarget) {
24899 MVT VT = Op.getSimpleValueType();
24900 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
24901 unsigned Opcode = Op.getOpcode();
24902 if (VT.getScalarType() == MVT::i1) {
24905 default: llvm_unreachable("Expected saturated arithmetic opcode");
24908 // *addsat i1 X, Y --> X | Y
24909 return DAG.getNode(ISD::OR, dl, VT, X, Y);
24912 // *subsat i1 X, Y --> X & ~Y
24913 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
24917 if (VT.is128BitVector()) {
24918 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
24919 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24920 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
24921 *DAG.getContext(), VT);
24923 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
24924 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
24925 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
24926 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
24927 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
24929 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
24930 // usubsat X, Y --> (X >u Y) ? X - Y : 0
24931 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
24932 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
24933 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
24935 // Use default expansion.
24939 assert(Op.getSimpleValueType().is256BitVector() &&
24940 Op.getSimpleValueType().isInteger() &&
24941 "Only handle AVX 256-bit vector integer operation");
24942 return split256IntArith(Op, DAG);
24945 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
24946 SelectionDAG &DAG) {
24947 MVT VT = Op.getSimpleValueType();
24948 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
24949 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24950 // 8-bit integer abs to NEG and CMOV.
24952 SDValue N0 = Op.getOperand(0);
24953 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24954 DAG.getConstant(0, DL, VT), N0);
24955 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
24956 SDValue(Neg.getNode(), 1)};
24957 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
24960 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
24961 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
24963 SDValue Src = Op.getOperand(0);
24965 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
24966 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
24969 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
24970 assert(VT.isInteger() &&
24971 "Only handle AVX 256-bit vector integer operation");
24972 return Lower256IntUnary(Op, DAG);
24975 // Default to expand.
24979 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
24980 MVT VT = Op.getSimpleValueType();
24982 // For AVX1 cases, split to use legal ops (everything but v4i64).
24983 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
24984 return split256IntArith(Op, DAG);
24987 unsigned Opcode = Op.getOpcode();
24988 SDValue N0 = Op.getOperand(0);
24989 SDValue N1 = Op.getOperand(1);
24991 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
24992 // using the SMIN/SMAX instructions and flipping the signbit back.
24993 if (VT == MVT::v8i16) {
24994 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
24995 "Unexpected MIN/MAX opcode");
24996 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
24997 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
24998 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
24999 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
25000 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
25001 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
25004 // Else, expand to a compare/select.
25007 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
25008 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
25009 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
25010 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
25011 default: llvm_unreachable("Unknown MINMAX opcode");
25014 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
25015 return DAG.getSelect(DL, VT, Cond, N0, N1);
25018 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
25019 SelectionDAG &DAG) {
25021 MVT VT = Op.getSimpleValueType();
25023 if (VT.getScalarType() == MVT::i1)
25024 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
25026 // Decompose 256-bit ops into 128-bit ops.
25027 if (VT.is256BitVector() && !Subtarget.hasInt256())
25028 return split256IntArith(Op, DAG);
25030 SDValue A = Op.getOperand(0);
25031 SDValue B = Op.getOperand(1);
25033 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
25034 // vector pairs, multiply and truncate.
25035 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
25036 unsigned NumElts = VT.getVectorNumElements();
25038 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25039 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25040 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
25041 return DAG.getNode(
25042 ISD::TRUNCATE, dl, VT,
25043 DAG.getNode(ISD::MUL, dl, ExVT,
25044 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
25045 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
25048 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25050 // Extract the lo/hi parts to any extend to i16.
25051 // We're going to mask off the low byte of each result element of the
25052 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
25054 SDValue Undef = DAG.getUNDEF(VT);
25055 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
25056 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
25059 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25060 // If the LHS is a constant, manually unpackl/unpackh.
25061 SmallVector<SDValue, 16> LoOps, HiOps;
25062 for (unsigned i = 0; i != NumElts; i += 16) {
25063 for (unsigned j = 0; j != 8; ++j) {
25064 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
25066 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
25071 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25072 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25074 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
25075 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
25078 // Multiply, mask the lower 8bits of the lo/hi results and pack.
25079 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25080 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25081 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
25082 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
25083 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25086 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
25087 if (VT == MVT::v4i32) {
25088 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
25089 "Should not custom lower when pmulld is available!");
25091 // Extract the odd parts.
25092 static const int UnpackMask[] = { 1, -1, 3, -1 };
25093 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
25094 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
25096 // Multiply the even parts.
25097 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25098 DAG.getBitcast(MVT::v2i64, A),
25099 DAG.getBitcast(MVT::v2i64, B));
25100 // Now multiply odd parts.
25101 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25102 DAG.getBitcast(MVT::v2i64, Aodds),
25103 DAG.getBitcast(MVT::v2i64, Bodds));
25105 Evens = DAG.getBitcast(VT, Evens);
25106 Odds = DAG.getBitcast(VT, Odds);
25108 // Merge the two vectors back together with a shuffle. This expands into 2
25110 static const int ShufMask[] = { 0, 4, 2, 6 };
25111 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
25114 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
25115 "Only know how to lower V2I64/V4I64/V8I64 multiply");
25116 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
25118 // Ahi = psrlqi(a, 32);
25119 // Bhi = psrlqi(b, 32);
25121 // AloBlo = pmuludq(a, b);
25122 // AloBhi = pmuludq(a, Bhi);
25123 // AhiBlo = pmuludq(Ahi, b);
25125 // Hi = psllqi(AloBhi + AhiBlo, 32);
25126 // return AloBlo + Hi;
25127 KnownBits AKnown = DAG.computeKnownBits(A);
25128 KnownBits BKnown = DAG.computeKnownBits(B);
25130 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
25131 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
25132 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
25134 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
25135 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
25136 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
25138 SDValue Zero = DAG.getConstant(0, dl, VT);
25140 // Only multiply lo/hi halves that aren't known to be zero.
25141 SDValue AloBlo = Zero;
25142 if (!ALoIsZero && !BLoIsZero)
25143 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
25145 SDValue AloBhi = Zero;
25146 if (!ALoIsZero && !BHiIsZero) {
25147 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
25148 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
25151 SDValue AhiBlo = Zero;
25152 if (!AHiIsZero && !BLoIsZero) {
25153 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
25154 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
25157 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
25158 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
25160 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
25163 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
25164 SelectionDAG &DAG) {
25166 MVT VT = Op.getSimpleValueType();
25167 bool IsSigned = Op->getOpcode() == ISD::MULHS;
25168 unsigned NumElts = VT.getVectorNumElements();
25169 SDValue A = Op.getOperand(0);
25170 SDValue B = Op.getOperand(1);
25172 // Decompose 256-bit ops into 128-bit ops.
25173 if (VT.is256BitVector() && !Subtarget.hasInt256())
25174 return split256IntArith(Op, DAG);
25176 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
25177 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
25178 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
25179 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
25181 // PMULxD operations multiply each even value (starting at 0) of LHS with
25182 // the related value of RHS and produce a widen result.
25183 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
25184 // => <2 x i64> <ae|cg>
25186 // In other word, to have all the results, we need to perform two PMULxD:
25187 // 1. one with the even values.
25188 // 2. one with the odd values.
25189 // To achieve #2, with need to place the odd values at an even position.
25191 // Place the odd value at an even position (basically, shift all values 1
25192 // step to the left):
25193 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
25194 9, -1, 11, -1, 13, -1, 15, -1};
25195 // <a|b|c|d> => <b|undef|d|undef>
25196 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
25197 makeArrayRef(&Mask[0], NumElts));
25198 // <e|f|g|h> => <f|undef|h|undef>
25199 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
25200 makeArrayRef(&Mask[0], NumElts));
25202 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
25204 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
25206 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
25207 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
25208 // => <2 x i64> <ae|cg>
25209 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
25210 DAG.getBitcast(MulVT, A),
25211 DAG.getBitcast(MulVT, B)));
25212 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
25213 // => <2 x i64> <bf|dh>
25214 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
25215 DAG.getBitcast(MulVT, Odd0),
25216 DAG.getBitcast(MulVT, Odd1)));
25218 // Shuffle it back into the right order.
25219 SmallVector<int, 16> ShufMask(NumElts);
25220 for (int i = 0; i != (int)NumElts; ++i)
25221 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
25223 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
25225 // If we have a signed multiply but no PMULDQ fix up the result of an
25226 // unsigned multiply.
25227 if (IsSigned && !Subtarget.hasSSE41()) {
25228 SDValue Zero = DAG.getConstant(0, dl, VT);
25229 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
25230 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
25231 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
25232 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
25234 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
25235 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
25241 // Only i8 vectors should need custom lowering after this.
25242 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
25243 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
25244 "Unsupported vector type");
25246 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
25247 // logical shift down the upper half and pack back to i8.
25249 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
25250 // and then ashr/lshr the upper bits down to the lower bits before multiply.
25251 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25253 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25254 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25255 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
25256 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
25257 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
25258 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
25259 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
25260 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
25263 // For signed 512-bit vectors, split into 256-bit vectors to allow the
25264 // sign-extension to occur.
25265 if (VT == MVT::v64i8 && IsSigned)
25266 return split512IntArith(Op, DAG);
25268 // Signed AVX2 implementation - extend xmm subvectors to ymm.
25269 if (VT == MVT::v32i8 && IsSigned) {
25270 MVT ExVT = MVT::v16i16;
25271 SDValue ALo = extract128BitVector(A, 0, DAG, dl);
25272 SDValue BLo = extract128BitVector(B, 0, DAG, dl);
25273 SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
25274 SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
25275 ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
25276 BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
25277 AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
25278 BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
25279 SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25280 SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25281 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
25282 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
25284 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
25285 // Shuffle lowering should turn this into PACKUS+PERMQ
25286 Lo = DAG.getBitcast(VT, Lo);
25287 Hi = DAG.getBitcast(VT, Hi);
25288 return DAG.getVectorShuffle(VT, dl, Lo, Hi,
25289 { 0, 2, 4, 6, 8, 10, 12, 14,
25290 16, 18, 20, 22, 24, 26, 28, 30,
25291 32, 34, 36, 38, 40, 42, 44, 46,
25292 48, 50, 52, 54, 56, 58, 60, 62});
25295 // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
25296 // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
25297 // shift the results and pack the half lane results back together.
25299 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25301 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
25302 -1, -1, -1, -1, -1, -1, -1, -1};
25304 // Extract the lo parts and zero/sign extend to i16.
25305 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
25306 // shifts to sign extend. Using unpack for unsigned only requires an xor to
25307 // create zeros and a copy due to tied registers contraints pre-avx. But using
25308 // zero_extend_vector_inreg would require an additional pshufd for the high
25312 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
25313 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
25315 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
25316 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
25317 } else if (IsSigned) {
25318 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
25319 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
25321 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
25322 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
25324 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
25325 DAG.getConstant(0, dl, VT)));
25326 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
25327 DAG.getConstant(0, dl, VT)));
25331 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25332 // If the LHS is a constant, manually unpackl/unpackh and extend.
25333 SmallVector<SDValue, 16> LoOps, HiOps;
25334 for (unsigned i = 0; i != NumElts; i += 16) {
25335 for (unsigned j = 0; j != 8; ++j) {
25336 SDValue LoOp = B.getOperand(i + j);
25337 SDValue HiOp = B.getOperand(i + j + 8);
25340 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
25341 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
25343 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
25344 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
25347 LoOps.push_back(LoOp);
25348 HiOps.push_back(HiOp);
25352 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25353 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25354 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
25355 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
25357 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
25358 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
25359 } else if (IsSigned) {
25360 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
25361 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
25363 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
25364 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
25366 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
25367 DAG.getConstant(0, dl, VT)));
25368 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
25369 DAG.getConstant(0, dl, VT)));
25372 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
25373 // pack back to vXi8.
25374 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25375 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25376 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
25377 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
25379 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
25380 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25383 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
25384 assert(Subtarget.isTargetWin64() && "Unexpected target");
25385 EVT VT = Op.getValueType();
25386 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
25387 "Unexpected return type for lowering");
25391 switch (Op->getOpcode()) {
25392 default: llvm_unreachable("Unexpected request for libcall!");
25393 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
25394 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
25395 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
25396 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
25397 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
25398 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
25402 SDValue InChain = DAG.getEntryNode();
25404 TargetLowering::ArgListTy Args;
25405 TargetLowering::ArgListEntry Entry;
25406 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
25407 EVT ArgVT = Op->getOperand(i).getValueType();
25408 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
25409 "Unexpected argument type for lowering");
25410 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
25411 Entry.Node = StackPtr;
25412 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
25413 MachinePointerInfo(), /* Alignment = */ 16);
25414 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
25415 Entry.Ty = PointerType::get(ArgTy,0);
25416 Entry.IsSExt = false;
25417 Entry.IsZExt = false;
25418 Args.push_back(Entry);
25421 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
25422 getPointerTy(DAG.getDataLayout()));
25424 TargetLowering::CallLoweringInfo CLI(DAG);
25425 CLI.setDebugLoc(dl)
25428 getLibcallCallingConv(LC),
25429 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
25432 .setSExtResult(isSigned)
25433 .setZExtResult(!isSigned);
25435 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
25436 return DAG.getBitcast(VT, CallInfo.first);
25439 // Return true if the required (according to Opcode) shift-imm form is natively
25440 // supported by the Subtarget
25441 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
25443 if (VT.getScalarSizeInBits() < 16)
25446 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
25447 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
25450 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
25451 (VT.is256BitVector() && Subtarget.hasInt256());
25453 bool AShift = LShift && (Subtarget.hasAVX512() ||
25454 (VT != MVT::v2i64 && VT != MVT::v4i64));
25455 return (Opcode == ISD::SRA) ? AShift : LShift;
25458 // The shift amount is a variable, but it is the same for all vector lanes.
25459 // These instructions are defined together with shift-immediate.
25461 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
25463 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
25466 // Return true if the required (according to Opcode) variable-shift form is
25467 // natively supported by the Subtarget
25468 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
25471 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
25474 // vXi16 supported only on AVX-512, BWI
25475 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
25478 if (Subtarget.hasAVX512())
25481 bool LShift = VT.is128BitVector() || VT.is256BitVector();
25482 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
25483 return (Opcode == ISD::SRA) ? AShift : LShift;
25486 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
25487 const X86Subtarget &Subtarget) {
25488 MVT VT = Op.getSimpleValueType();
25490 SDValue R = Op.getOperand(0);
25491 SDValue Amt = Op.getOperand(1);
25492 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
25494 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
25495 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
25496 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
25497 SDValue Ex = DAG.getBitcast(ExVT, R);
25499 // ashr(R, 63) === cmp_slt(R, 0)
25500 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
25501 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
25502 "Unsupported PCMPGT op");
25503 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
25506 if (ShiftAmt >= 32) {
25507 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
25509 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
25510 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25511 ShiftAmt - 32, DAG);
25512 if (VT == MVT::v2i64)
25513 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
25514 if (VT == MVT::v4i64)
25515 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25516 {9, 1, 11, 3, 13, 5, 15, 7});
25518 // SRA upper i32, SRL whole i64 and select lower i32.
25519 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25522 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
25523 Lower = DAG.getBitcast(ExVT, Lower);
25524 if (VT == MVT::v2i64)
25525 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
25526 if (VT == MVT::v4i64)
25527 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25528 {8, 1, 10, 3, 12, 5, 14, 7});
25530 return DAG.getBitcast(VT, Ex);
25533 // Optimize shl/srl/sra with constant shift amount.
25534 APInt APIntShiftAmt;
25535 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
25538 // If the shift amount is out of range, return undef.
25539 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
25540 return DAG.getUNDEF(VT);
25542 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
25544 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
25545 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
25547 // i64 SRA needs to be performed as partial shifts.
25548 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
25549 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
25550 Op.getOpcode() == ISD::SRA)
25551 return ArithmeticShiftRight64(ShiftAmt);
25553 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
25554 VT == MVT::v64i8) {
25555 unsigned NumElts = VT.getVectorNumElements();
25556 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25558 // Simple i8 add case
25559 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
25560 return DAG.getNode(ISD::ADD, dl, VT, R, R);
25562 // ashr(R, 7) === cmp_slt(R, 0)
25563 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
25564 SDValue Zeros = DAG.getConstant(0, dl, VT);
25565 if (VT.is512BitVector()) {
25566 assert(VT == MVT::v64i8 && "Unexpected element type!");
25567 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
25568 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
25570 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
25573 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
25574 if (VT == MVT::v16i8 && Subtarget.hasXOP())
25577 if (Op.getOpcode() == ISD::SHL) {
25578 // Make a large shift.
25579 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
25581 SHL = DAG.getBitcast(VT, SHL);
25582 // Zero out the rightmost bits.
25583 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
25584 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
25586 if (Op.getOpcode() == ISD::SRL) {
25587 // Make a large shift.
25588 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
25590 SRL = DAG.getBitcast(VT, SRL);
25591 // Zero out the leftmost bits.
25592 return DAG.getNode(ISD::AND, dl, VT, SRL,
25593 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
25595 if (Op.getOpcode() == ISD::SRA) {
25596 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
25597 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25599 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
25600 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
25601 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
25604 llvm_unreachable("Unknown shift opcode.");
25610 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
25611 const X86Subtarget &Subtarget) {
25612 MVT VT = Op.getSimpleValueType();
25614 SDValue R = Op.getOperand(0);
25615 SDValue Amt = Op.getOperand(1);
25616 unsigned Opcode = Op.getOpcode();
25617 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
25618 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
25620 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
25621 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
25622 MVT EltVT = VT.getVectorElementType();
25623 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
25624 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
25625 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
25626 else if (EltVT.bitsLT(MVT::i32))
25627 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25629 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
25632 // vXi8 shifts - shift as v8i16 + mask result.
25633 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
25634 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
25635 VT == MVT::v64i8) &&
25636 !Subtarget.hasXOP()) {
25637 unsigned NumElts = VT.getVectorNumElements();
25638 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25639 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
25640 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
25641 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
25642 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25644 // Create the mask using vXi16 shifts. For shift-rights we need to move
25645 // the upper byte down before splatting the vXi8 mask.
25646 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
25647 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
25648 BaseShAmt, Subtarget, DAG);
25649 if (Opcode != ISD::SHL)
25650 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
25652 BitMask = DAG.getBitcast(VT, BitMask);
25653 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
25654 SmallVector<int, 64>(NumElts, 0));
25656 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
25657 DAG.getBitcast(ExtVT, R), BaseShAmt,
25659 Res = DAG.getBitcast(VT, Res);
25660 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
25662 if (Opcode == ISD::SRA) {
25663 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
25664 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
25665 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
25666 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
25667 BaseShAmt, Subtarget, DAG);
25668 SignMask = DAG.getBitcast(VT, SignMask);
25669 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
25670 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
25677 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
25678 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
25679 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
25680 Amt = Amt.getOperand(0);
25681 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
25682 std::vector<SDValue> Vals(Ratio);
25683 for (unsigned i = 0; i != Ratio; ++i)
25684 Vals[i] = Amt.getOperand(i);
25685 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
25686 for (unsigned j = 0; j != Ratio; ++j)
25687 if (Vals[j] != Amt.getOperand(i + j))
25691 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
25692 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
25697 // Convert a shift/rotate left amount to a multiplication scale factor.
25698 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
25699 const X86Subtarget &Subtarget,
25700 SelectionDAG &DAG) {
25701 MVT VT = Amt.getSimpleValueType();
25702 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
25703 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
25704 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
25707 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
25708 SmallVector<SDValue, 8> Elts;
25709 MVT SVT = VT.getVectorElementType();
25710 unsigned SVTBits = SVT.getSizeInBits();
25711 APInt One(SVTBits, 1);
25712 unsigned NumElems = VT.getVectorNumElements();
25714 for (unsigned i = 0; i != NumElems; ++i) {
25715 SDValue Op = Amt->getOperand(i);
25716 if (Op->isUndef()) {
25717 Elts.push_back(Op);
25721 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
25722 APInt C(SVTBits, ND->getZExtValue());
25723 uint64_t ShAmt = C.getZExtValue();
25724 if (ShAmt >= SVTBits) {
25725 Elts.push_back(DAG.getUNDEF(SVT));
25728 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
25730 return DAG.getBuildVector(VT, dl, Elts);
25733 // If the target doesn't support variable shifts, use either FP conversion
25734 // or integer multiplication to avoid shifting each element individually.
25735 if (VT == MVT::v4i32) {
25736 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
25737 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
25738 DAG.getConstant(0x3f800000U, dl, VT));
25739 Amt = DAG.getBitcast(MVT::v4f32, Amt);
25740 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
25743 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
25744 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
25745 SDValue Z = DAG.getConstant(0, dl, VT);
25746 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
25747 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
25748 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
25749 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
25750 if (Subtarget.hasSSE41())
25751 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25753 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
25754 DAG.getBitcast(VT, Hi),
25755 {0, 2, 4, 6, 8, 10, 12, 14});
25761 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
25762 SelectionDAG &DAG) {
25763 MVT VT = Op.getSimpleValueType();
25765 SDValue R = Op.getOperand(0);
25766 SDValue Amt = Op.getOperand(1);
25767 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25768 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25770 unsigned Opc = Op.getOpcode();
25771 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
25772 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
25774 assert(VT.isVector() && "Custom lowering only for vector shifts!");
25775 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
25777 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
25780 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
25783 if (SupportedVectorVarShift(VT, Subtarget, Opc))
25786 // XOP has 128-bit variable logical/arithmetic shifts.
25787 // +ve/-ve Amt = shift left/right.
25788 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
25789 VT == MVT::v8i16 || VT == MVT::v16i8)) {
25790 if (Opc == ISD::SRL || Opc == ISD::SRA) {
25791 SDValue Zero = DAG.getConstant(0, dl, VT);
25792 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
25794 if (Opc == ISD::SHL || Opc == ISD::SRL)
25795 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
25796 if (Opc == ISD::SRA)
25797 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
25800 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
25801 // shifts per-lane and then shuffle the partial results back together.
25802 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
25803 // Splat the shift amounts so the scalar shifts above will catch it.
25804 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
25805 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
25806 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
25807 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
25808 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
25811 // i64 vector arithmetic shift can be emulated with the transform:
25812 // M = lshr(SIGN_MASK, Amt)
25813 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
25814 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
25816 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
25817 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
25818 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25819 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
25820 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
25824 // If possible, lower this shift as a sequence of two shifts by
25825 // constant plus a BLENDing shuffle instead of scalarizing it.
25827 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
25829 // Could be rewritten as:
25830 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
25832 // The advantage is that the two shifts from the example would be
25833 // lowered as X86ISD::VSRLI nodes in parallel before blending.
25834 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
25835 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25836 SDValue Amt1, Amt2;
25837 unsigned NumElts = VT.getVectorNumElements();
25838 SmallVector<int, 8> ShuffleMask;
25839 for (unsigned i = 0; i != NumElts; ++i) {
25840 SDValue A = Amt->getOperand(i);
25842 ShuffleMask.push_back(SM_SentinelUndef);
25845 if (!Amt1 || Amt1 == A) {
25846 ShuffleMask.push_back(i);
25850 if (!Amt2 || Amt2 == A) {
25851 ShuffleMask.push_back(i + NumElts);
25858 // Only perform this blend if we can perform it without loading a mask.
25859 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
25860 (VT != MVT::v16i16 ||
25861 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
25862 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
25863 canWidenShuffleElements(ShuffleMask))) {
25864 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
25865 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
25866 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
25867 Cst2->getAPIntValue().ult(EltSizeInBits)) {
25868 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25869 Cst1->getZExtValue(), DAG);
25870 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25871 Cst2->getZExtValue(), DAG);
25872 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
25877 // If possible, lower this packed shift into a vector multiply instead of
25878 // expanding it into a sequence of scalar shifts.
25879 if (Opc == ISD::SHL)
25880 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
25881 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
25883 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
25884 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
25885 if (Opc == ISD::SRL && ConstantAmt &&
25886 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25887 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25888 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25889 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25890 SDValue Zero = DAG.getConstant(0, dl, VT);
25891 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
25892 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
25893 return DAG.getSelect(dl, VT, ZAmt, R, Res);
25897 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
25898 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
25899 // TODO: Special case handling for shift by 0/1, really we can afford either
25900 // of these cases in pre-SSE41/XOP/AVX512 but not both.
25901 if (Opc == ISD::SRA && ConstantAmt &&
25902 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
25903 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
25904 !Subtarget.hasAVX512()) ||
25905 DAG.isKnownNeverZero(Amt))) {
25906 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25907 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25908 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25910 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
25912 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
25914 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
25915 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
25916 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
25917 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
25921 // v4i32 Non Uniform Shifts.
25922 // If the shift amount is constant we can shift each lane using the SSE2
25923 // immediate shifts, else we need to zero-extend each lane to the lower i64
25924 // and shift using the SSE2 variable shifts.
25925 // The separate results can then be blended together.
25926 if (VT == MVT::v4i32) {
25927 SDValue Amt0, Amt1, Amt2, Amt3;
25929 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
25930 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
25931 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
25932 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
25934 // The SSE2 shifts use the lower i64 as the same shift amount for
25935 // all lanes and the upper i64 is ignored. On AVX we're better off
25936 // just zero-extending, but for SSE just duplicating the top 16-bits is
25937 // cheaper and has the same effect for out of range values.
25938 if (Subtarget.hasAVX()) {
25939 SDValue Z = DAG.getConstant(0, dl, VT);
25940 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
25941 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
25942 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
25943 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
25945 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
25946 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25947 {4, 5, 6, 7, -1, -1, -1, -1});
25948 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25949 {0, 1, 1, 1, -1, -1, -1, -1});
25950 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25951 {2, 3, 3, 3, -1, -1, -1, -1});
25952 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25953 {0, 1, 1, 1, -1, -1, -1, -1});
25954 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25955 {2, 3, 3, 3, -1, -1, -1, -1});
25959 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
25960 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
25961 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
25962 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
25963 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
25965 // Merge the shifted lane results optimally with/without PBLENDW.
25966 // TODO - ideally shuffle combining would handle this.
25967 if (Subtarget.hasSSE41()) {
25968 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
25969 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
25970 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
25972 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
25973 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
25974 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
25977 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
25978 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
25979 // make the existing SSE solution better.
25980 // NOTE: We honor prefered vector width before promoting to 512-bits.
25981 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
25982 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
25983 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
25984 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
25985 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
25986 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
25987 "Unexpected vector type");
25988 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
25989 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
25990 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25991 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
25992 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
25993 return DAG.getNode(ISD::TRUNCATE, dl, VT,
25994 DAG.getNode(Opc, dl, ExtVT, R, Amt));
25997 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
25998 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
25999 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
26000 (VT == MVT::v16i8 || VT == MVT::v64i8 ||
26001 (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
26002 !Subtarget.hasXOP()) {
26003 int NumElts = VT.getVectorNumElements();
26004 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
26006 // Extend constant shift amount to vXi16 (it doesn't matter if the type
26008 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26009 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
26010 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
26011 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
26012 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
26013 "Constant build vector expected");
26015 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
26016 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
26017 : DAG.getZExtOrTrunc(R, dl, ExVT);
26018 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
26019 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
26020 return DAG.getZExtOrTrunc(R, dl, VT);
26023 SmallVector<SDValue, 16> LoAmt, HiAmt;
26024 for (int i = 0; i != NumElts; i += 16) {
26025 for (int j = 0; j != 8; ++j) {
26026 LoAmt.push_back(Amt.getOperand(i + j));
26027 HiAmt.push_back(Amt.getOperand(i + j + 8));
26031 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
26032 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
26033 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
26035 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
26036 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
26037 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
26038 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
26039 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
26040 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
26041 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
26042 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
26043 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
26046 if (VT == MVT::v16i8 ||
26047 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
26048 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
26049 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
26051 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26052 if (VT.is512BitVector()) {
26053 // On AVX512BW targets we make use of the fact that VSELECT lowers
26054 // to a masked blend which selects bytes based just on the sign bit
26055 // extracted to a mask.
26056 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26057 V0 = DAG.getBitcast(VT, V0);
26058 V1 = DAG.getBitcast(VT, V1);
26059 Sel = DAG.getBitcast(VT, Sel);
26060 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
26062 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26063 } else if (Subtarget.hasSSE41()) {
26064 // On SSE41 targets we make use of the fact that VSELECT lowers
26065 // to PBLENDVB which selects bytes based just on the sign bit.
26066 V0 = DAG.getBitcast(VT, V0);
26067 V1 = DAG.getBitcast(VT, V1);
26068 Sel = DAG.getBitcast(VT, Sel);
26069 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26071 // On pre-SSE41 targets we test for the sign bit by comparing to
26072 // zero - a negative value will set all bits of the lanes to true
26073 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26074 SDValue Z = DAG.getConstant(0, dl, SelVT);
26075 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
26076 return DAG.getSelect(dl, SelVT, C, V0, V1);
26079 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26080 // We can safely do this using i16 shifts as we're only interested in
26081 // the 3 lower bits of each byte.
26082 Amt = DAG.getBitcast(ExtVT, Amt);
26083 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
26084 Amt = DAG.getBitcast(VT, Amt);
26086 if (Opc == ISD::SHL || Opc == ISD::SRL) {
26087 // r = VSELECT(r, shift(r, 4), a);
26088 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
26089 R = SignBitSelect(VT, Amt, M, R);
26092 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26094 // r = VSELECT(r, shift(r, 2), a);
26095 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
26096 R = SignBitSelect(VT, Amt, M, R);
26099 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26101 // return VSELECT(r, shift(r, 1), a);
26102 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
26103 R = SignBitSelect(VT, Amt, M, R);
26107 if (Opc == ISD::SRA) {
26108 // For SRA we need to unpack each byte to the higher byte of a i16 vector
26109 // so we can correctly sign extend. We don't care what happens to the
26111 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26112 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26113 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
26114 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
26115 ALo = DAG.getBitcast(ExtVT, ALo);
26116 AHi = DAG.getBitcast(ExtVT, AHi);
26117 RLo = DAG.getBitcast(ExtVT, RLo);
26118 RHi = DAG.getBitcast(ExtVT, RHi);
26120 // r = VSELECT(r, shift(r, 4), a);
26121 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
26122 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
26123 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26124 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26127 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26128 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26130 // r = VSELECT(r, shift(r, 2), a);
26131 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
26132 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
26133 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26134 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26137 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26138 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26140 // r = VSELECT(r, shift(r, 1), a);
26141 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
26142 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
26143 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26144 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26146 // Logical shift the result back to the lower byte, leaving a zero upper
26147 // byte meaning that we can safely pack with PACKUSWB.
26148 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
26149 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
26150 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26154 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
26155 MVT ExtVT = MVT::v8i32;
26156 SDValue Z = DAG.getConstant(0, dl, VT);
26157 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
26158 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
26159 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
26160 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
26161 ALo = DAG.getBitcast(ExtVT, ALo);
26162 AHi = DAG.getBitcast(ExtVT, AHi);
26163 RLo = DAG.getBitcast(ExtVT, RLo);
26164 RHi = DAG.getBitcast(ExtVT, RHi);
26165 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
26166 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
26167 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
26168 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
26169 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
26172 if (VT == MVT::v8i16) {
26173 // If we have a constant shift amount, the non-SSE41 path is best as
26174 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
26175 bool UseSSE41 = Subtarget.hasSSE41() &&
26176 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26178 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
26179 // On SSE41 targets we make use of the fact that VSELECT lowers
26180 // to PBLENDVB which selects bytes based just on the sign bit.
26182 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
26183 V0 = DAG.getBitcast(ExtVT, V0);
26184 V1 = DAG.getBitcast(ExtVT, V1);
26185 Sel = DAG.getBitcast(ExtVT, Sel);
26186 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
26188 // On pre-SSE41 targets we splat the sign bit - a negative value will
26189 // set all bits of the lanes to true and VSELECT uses that in
26190 // its OR(AND(V0,C),AND(V1,~C)) lowering.
26192 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
26193 return DAG.getSelect(dl, VT, C, V0, V1);
26196 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
26198 // On SSE41 targets we need to replicate the shift mask in both
26199 // bytes for PBLENDVB.
26202 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
26203 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
26205 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
26208 // r = VSELECT(r, shift(r, 8), a);
26209 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
26210 R = SignBitSelect(Amt, M, R);
26213 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26215 // r = VSELECT(r, shift(r, 4), a);
26216 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
26217 R = SignBitSelect(Amt, M, R);
26220 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26222 // r = VSELECT(r, shift(r, 2), a);
26223 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
26224 R = SignBitSelect(Amt, M, R);
26227 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26229 // return VSELECT(r, shift(r, 1), a);
26230 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
26231 R = SignBitSelect(Amt, M, R);
26235 // Decompose 256-bit shifts into 128-bit shifts.
26236 if (VT.is256BitVector())
26237 return split256IntArith(Op, DAG);
26242 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
26243 SelectionDAG &DAG) {
26244 MVT VT = Op.getSimpleValueType();
26245 assert(VT.isVector() && "Custom lowering only for vector rotates!");
26248 SDValue R = Op.getOperand(0);
26249 SDValue Amt = Op.getOperand(1);
26250 unsigned Opcode = Op.getOpcode();
26251 unsigned EltSizeInBits = VT.getScalarSizeInBits();
26252 int NumElts = VT.getVectorNumElements();
26254 // Check for constant splat rotation amount.
26256 SmallVector<APInt, 32> EltBits;
26257 int CstSplatIndex = -1;
26258 if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
26259 for (int i = 0; i != NumElts; ++i)
26260 if (!UndefElts[i]) {
26261 if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
26265 CstSplatIndex = -1;
26269 // AVX512 implicitly uses modulo rotation amounts.
26270 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
26271 // Attempt to rotate by immediate.
26272 if (0 <= CstSplatIndex) {
26273 unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
26274 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
26275 return DAG.getNode(Op, DL, VT, R,
26276 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
26279 // Else, fall-back on VPROLV/VPRORV.
26283 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
26285 // XOP has 128-bit vector variable + immediate rotates.
26286 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
26287 // XOP implicitly uses modulo rotation amounts.
26288 if (Subtarget.hasXOP()) {
26289 if (VT.is256BitVector())
26290 return split256IntArith(Op, DAG);
26291 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
26293 // Attempt to rotate by immediate.
26294 if (0 <= CstSplatIndex) {
26295 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
26296 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
26297 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
26300 // Use general rotate by variable (per-element).
26304 // Split 256-bit integers on pre-AVX2 targets.
26305 if (VT.is256BitVector() && !Subtarget.hasAVX2())
26306 return split256IntArith(Op, DAG);
26308 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
26309 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
26310 Subtarget.hasAVX2())) &&
26311 "Only vXi32/vXi16/vXi8 vector rotates supported");
26313 // Rotate by an uniform constant - expand back to shifts.
26314 if (0 <= CstSplatIndex)
26317 bool IsSplatAmt = DAG.isSplatValue(Amt);
26319 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
26321 if (EltSizeInBits == 8 && !IsSplatAmt) {
26322 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
26325 // We don't need ModuloAmt here as we just peek at individual bits.
26326 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26328 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26329 if (Subtarget.hasSSE41()) {
26330 // On SSE41 targets we make use of the fact that VSELECT lowers
26331 // to PBLENDVB which selects bytes based just on the sign bit.
26332 V0 = DAG.getBitcast(VT, V0);
26333 V1 = DAG.getBitcast(VT, V1);
26334 Sel = DAG.getBitcast(VT, Sel);
26335 return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
26337 // On pre-SSE41 targets we test for the sign bit by comparing to
26338 // zero - a negative value will set all bits of the lanes to true
26339 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26340 SDValue Z = DAG.getConstant(0, DL, SelVT);
26341 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
26342 return DAG.getSelect(DL, SelVT, C, V0, V1);
26345 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26346 // We can safely do this using i16 shifts as we're only interested in
26347 // the 3 lower bits of each byte.
26348 Amt = DAG.getBitcast(ExtVT, Amt);
26349 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
26350 Amt = DAG.getBitcast(VT, Amt);
26352 // r = VSELECT(r, rot(r, 4), a);
26356 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
26357 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
26358 R = SignBitSelect(VT, Amt, M, R);
26361 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
26363 // r = VSELECT(r, rot(r, 2), a);
26366 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
26367 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
26368 R = SignBitSelect(VT, Amt, M, R);
26371 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
26373 // return VSELECT(r, rot(r, 1), a);
26376 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
26377 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
26378 return SignBitSelect(VT, Amt, M, R);
26381 // ISD::ROT* uses modulo rotate amounts.
26382 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
26383 DAG.getConstant(EltSizeInBits - 1, DL, VT));
26385 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26386 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
26387 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
26389 // Fallback for splats + all supported variable shifts.
26390 // Fallback for non-constants AVX2 vXi16 as well.
26391 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
26392 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
26393 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
26394 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
26395 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
26396 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
26399 // As with shifts, convert the rotation amount to a multiplication factor.
26400 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
26401 assert(Scale && "Failed to convert ROTL amount to scale");
26403 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
26404 if (EltSizeInBits == 16) {
26405 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
26406 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
26407 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
26410 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
26411 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
26412 // that can then be OR'd with the lower 32-bits.
26413 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
26414 static const int OddMask[] = {1, -1, 3, -1};
26415 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
26416 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
26418 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
26419 DAG.getBitcast(MVT::v2i64, R),
26420 DAG.getBitcast(MVT::v2i64, Scale));
26421 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
26422 DAG.getBitcast(MVT::v2i64, R13),
26423 DAG.getBitcast(MVT::v2i64, Scale13));
26424 Res02 = DAG.getBitcast(VT, Res02);
26425 Res13 = DAG.getBitcast(VT, Res13);
26427 return DAG.getNode(ISD::OR, DL, VT,
26428 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
26429 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
26432 /// Returns true if the operand type is exactly twice the native width, and
26433 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
26434 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
26435 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
26436 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
26437 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
26440 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
26441 if (OpWidth == 128)
26442 return Subtarget.hasCmpxchg16b();
26447 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
26448 // TODO: In 32-bit mode, use FISTP when X87 is available?
26449 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
26450 Type *MemType = SI->getValueOperand()->getType();
26452 bool NoImplicitFloatOps =
26453 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
26454 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
26455 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
26458 return needsCmpXchgNb(MemType);
26461 // Note: this turns large loads into lock cmpxchg8b/16b.
26462 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
26463 TargetLowering::AtomicExpansionKind
26464 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
26465 Type *MemType = LI->getType();
26467 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
26468 // can use movq to do the load. If we have X87 we can load into an 80-bit
26469 // X87 register and store it to a stack temporary.
26470 bool NoImplicitFloatOps =
26471 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
26472 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
26473 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
26474 (Subtarget.hasSSE2() || Subtarget.hasX87()))
26475 return AtomicExpansionKind::None;
26477 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26478 : AtomicExpansionKind::None;
26481 TargetLowering::AtomicExpansionKind
26482 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
26483 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26484 Type *MemType = AI->getType();
26486 // If the operand is too big, we must see if cmpxchg8/16b is available
26487 // and default to library calls otherwise.
26488 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
26489 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26490 : AtomicExpansionKind::None;
26493 AtomicRMWInst::BinOp Op = AI->getOperation();
26496 llvm_unreachable("Unknown atomic operation");
26497 case AtomicRMWInst::Xchg:
26498 case AtomicRMWInst::Add:
26499 case AtomicRMWInst::Sub:
26500 // It's better to use xadd, xsub or xchg for these in all cases.
26501 return AtomicExpansionKind::None;
26502 case AtomicRMWInst::Or:
26503 case AtomicRMWInst::And:
26504 case AtomicRMWInst::Xor:
26505 // If the atomicrmw's result isn't actually used, we can just add a "lock"
26506 // prefix to a normal instruction for these operations.
26507 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
26508 : AtomicExpansionKind::None;
26509 case AtomicRMWInst::Nand:
26510 case AtomicRMWInst::Max:
26511 case AtomicRMWInst::Min:
26512 case AtomicRMWInst::UMax:
26513 case AtomicRMWInst::UMin:
26514 case AtomicRMWInst::FAdd:
26515 case AtomicRMWInst::FSub:
26516 // These always require a non-trivial set of data operations on x86. We must
26517 // use a cmpxchg loop.
26518 return AtomicExpansionKind::CmpXChg;
26523 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
26524 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26525 Type *MemType = AI->getType();
26526 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
26527 // there is no benefit in turning such RMWs into loads, and it is actually
26528 // harmful as it introduces a mfence.
26529 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
26532 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
26533 // lowering available in lowerAtomicArith.
26534 // TODO: push more cases through this path.
26535 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
26536 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
26540 auto Builder = IRBuilder<>(AI);
26541 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
26542 auto SSID = AI->getSyncScopeID();
26543 // We must restrict the ordering to avoid generating loads with Release or
26544 // ReleaseAcquire orderings.
26545 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
26547 // Before the load we need a fence. Here is an example lifted from
26548 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
26551 // x.store(1, relaxed);
26552 // r1 = y.fetch_add(0, release);
26554 // y.fetch_add(42, acquire);
26555 // r2 = x.load(relaxed);
26556 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
26557 // lowered to just a load without a fence. A mfence flushes the store buffer,
26558 // making the optimization clearly correct.
26559 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
26560 // otherwise, we might be able to be more aggressive on relaxed idempotent
26561 // rmw. In practice, they do not look useful, so we don't try to be
26562 // especially clever.
26563 if (SSID == SyncScope::SingleThread)
26564 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
26565 // the IR level, so we must wrap it in an intrinsic.
26568 if (!Subtarget.hasMFence())
26569 // FIXME: it might make sense to use a locked operation here but on a
26570 // different cache-line to prevent cache-line bouncing. In practice it
26571 // is probably a small win, and x86 processors without mfence are rare
26572 // enough that we do not bother.
26576 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
26577 Builder.CreateCall(MFence, {});
26579 // Finally we can emit the atomic load.
26581 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
26582 AI->getType()->getPrimitiveSizeInBits());
26583 Loaded->setAtomic(Order, SSID);
26584 AI->replaceAllUsesWith(Loaded);
26585 AI->eraseFromParent();
26589 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
26590 if (!SI.isUnordered())
26592 return ExperimentalUnorderedISEL;
26594 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
26595 if (!LI.isUnordered())
26597 return ExperimentalUnorderedISEL;
26601 /// Emit a locked operation on a stack location which does not change any
26602 /// memory location, but does involve a lock prefix. Location is chosen to be
26603 /// a) very likely accessed only by a single thread to minimize cache traffic,
26604 /// and b) definitely dereferenceable. Returns the new Chain result.
26605 static SDValue emitLockedStackOp(SelectionDAG &DAG,
26606 const X86Subtarget &Subtarget,
26607 SDValue Chain, SDLoc DL) {
26608 // Implementation notes:
26609 // 1) LOCK prefix creates a full read/write reordering barrier for memory
26610 // operations issued by the current processor. As such, the location
26611 // referenced is not relevant for the ordering properties of the instruction.
26612 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
26613 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
26614 // 2) Using an immediate operand appears to be the best encoding choice
26615 // here since it doesn't require an extra register.
26616 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
26617 // is small enough it might just be measurement noise.)
26618 // 4) When choosing offsets, there are several contributing factors:
26619 // a) If there's no redzone, we default to TOS. (We could allocate a cache
26620 // line aligned stack object to improve this case.)
26621 // b) To minimize our chances of introducing a false dependence, we prefer
26622 // to offset the stack usage from TOS slightly.
26623 // c) To minimize concerns about cross thread stack usage - in particular,
26624 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
26625 // captures state in the TOS frame and accesses it from many threads -
26626 // we want to use an offset such that the offset is in a distinct cache
26627 // line from the TOS frame.
26629 // For a general discussion of the tradeoffs and benchmark results, see:
26630 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
26632 auto &MF = DAG.getMachineFunction();
26633 auto &TFL = *Subtarget.getFrameLowering();
26634 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
26636 if (Subtarget.is64Bit()) {
26637 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26639 DAG.getRegister(X86::RSP, MVT::i64), // Base
26640 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26641 DAG.getRegister(0, MVT::i64), // Index
26642 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26643 DAG.getRegister(0, MVT::i16), // Segment.
26646 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26648 return SDValue(Res, 1);
26651 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26653 DAG.getRegister(X86::ESP, MVT::i32), // Base
26654 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26655 DAG.getRegister(0, MVT::i32), // Index
26656 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26657 DAG.getRegister(0, MVT::i16), // Segment.
26661 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26663 return SDValue(Res, 1);
26666 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
26667 SelectionDAG &DAG) {
26669 AtomicOrdering FenceOrdering =
26670 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
26671 SyncScope::ID FenceSSID =
26672 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
26674 // The only fence that needs an instruction is a sequentially-consistent
26675 // cross-thread fence.
26676 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
26677 FenceSSID == SyncScope::System) {
26678 if (Subtarget.hasMFence())
26679 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
26681 SDValue Chain = Op.getOperand(0);
26682 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
26685 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26686 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
26689 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
26690 SelectionDAG &DAG) {
26691 MVT T = Op.getSimpleValueType();
26695 switch(T.SimpleTy) {
26696 default: llvm_unreachable("Invalid value type!");
26697 case MVT::i8: Reg = X86::AL; size = 1; break;
26698 case MVT::i16: Reg = X86::AX; size = 2; break;
26699 case MVT::i32: Reg = X86::EAX; size = 4; break;
26701 assert(Subtarget.is64Bit() && "Node not type legal!");
26702 Reg = X86::RAX; size = 8;
26705 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
26706 Op.getOperand(2), SDValue());
26707 SDValue Ops[] = { cpIn.getValue(0),
26710 DAG.getTargetConstant(size, DL, MVT::i8),
26711 cpIn.getValue(1) };
26712 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26713 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
26714 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
26718 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
26719 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
26720 MVT::i32, cpOut.getValue(2));
26721 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
26723 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26724 cpOut, Success, EFLAGS.getValue(1));
26727 // Create MOVMSKB, taking into account whether we need to split for AVX1.
26728 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
26729 const X86Subtarget &Subtarget) {
26730 MVT InVT = V.getSimpleValueType();
26732 if (InVT == MVT::v64i8) {
26734 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26735 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
26736 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
26737 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
26738 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
26739 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
26740 DAG.getConstant(32, DL, MVT::i8));
26741 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
26743 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
26745 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26746 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
26747 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
26748 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
26749 DAG.getConstant(16, DL, MVT::i8));
26750 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
26753 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
26756 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
26757 SelectionDAG &DAG) {
26758 SDValue Src = Op.getOperand(0);
26759 MVT SrcVT = Src.getSimpleValueType();
26760 MVT DstVT = Op.getSimpleValueType();
26762 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
26763 // half to v32i1 and concatenating the result.
26764 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
26765 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
26766 assert(Subtarget.hasBWI() && "Expected BWI target");
26768 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26769 DAG.getIntPtrConstant(0, dl));
26770 Lo = DAG.getBitcast(MVT::v32i1, Lo);
26771 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26772 DAG.getIntPtrConstant(1, dl));
26773 Hi = DAG.getBitcast(MVT::v32i1, Hi);
26774 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26777 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
26778 if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
26779 DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
26782 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
26783 MVT CastVT = DstVT.getHalfNumVectorElementsVT();
26784 Lo = DAG.getBitcast(CastVT, Lo);
26785 Hi = DAG.getBitcast(CastVT, Hi);
26786 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
26789 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
26790 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
26791 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
26792 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
26794 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
26795 V = getPMOVMSKB(DL, V, DAG, Subtarget);
26796 return DAG.getZExtOrTrunc(V, DL, DstVT);
26799 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
26800 SrcVT == MVT::i64) && "Unexpected VT!");
26802 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
26803 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
26804 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
26805 // This conversion needs to be expanded.
26809 if (SrcVT.isVector()) {
26810 // Widen the vector in input in the case of MVT::v2i32.
26811 // Example: from MVT::v2i32 to MVT::v4i32.
26812 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
26813 SrcVT.getVectorNumElements() * 2);
26814 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
26815 DAG.getUNDEF(SrcVT));
26817 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
26818 "Unexpected source type in LowerBITCAST");
26819 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
26822 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
26823 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
26825 if (DstVT == MVT::x86mmx)
26826 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
26828 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
26829 DAG.getIntPtrConstant(0, dl));
26832 /// Compute the horizontal sum of bytes in V for the elements of VT.
26834 /// Requires V to be a byte vector and VT to be an integer vector type with
26835 /// wider elements than V's type. The width of the elements of VT determines
26836 /// how many bytes of V are summed horizontally to produce each element of the
26838 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
26839 const X86Subtarget &Subtarget,
26840 SelectionDAG &DAG) {
26842 MVT ByteVecVT = V.getSimpleValueType();
26843 MVT EltVT = VT.getVectorElementType();
26844 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
26845 "Expected value to have byte element type.");
26846 assert(EltVT != MVT::i8 &&
26847 "Horizontal byte sum only makes sense for wider elements!");
26848 unsigned VecSize = VT.getSizeInBits();
26849 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
26851 // PSADBW instruction horizontally add all bytes and leave the result in i64
26852 // chunks, thus directly computes the pop count for v2i64 and v4i64.
26853 if (EltVT == MVT::i64) {
26854 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
26855 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26856 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
26857 return DAG.getBitcast(VT, V);
26860 if (EltVT == MVT::i32) {
26861 // We unpack the low half and high half into i32s interleaved with zeros so
26862 // that we can use PSADBW to horizontally sum them. The most useful part of
26863 // this is that it lines up the results of two PSADBW instructions to be
26864 // two v2i64 vectors which concatenated are the 4 population counts. We can
26865 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
26866 SDValue Zeros = DAG.getConstant(0, DL, VT);
26867 SDValue V32 = DAG.getBitcast(VT, V);
26868 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
26869 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
26871 // Do the horizontal sums into two v2i64s.
26872 Zeros = DAG.getConstant(0, DL, ByteVecVT);
26873 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26874 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26875 DAG.getBitcast(ByteVecVT, Low), Zeros);
26876 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26877 DAG.getBitcast(ByteVecVT, High), Zeros);
26879 // Merge them together.
26880 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
26881 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
26882 DAG.getBitcast(ShortVecVT, Low),
26883 DAG.getBitcast(ShortVecVT, High));
26885 return DAG.getBitcast(VT, V);
26888 // The only element type left is i16.
26889 assert(EltVT == MVT::i16 && "Unknown how to handle type");
26891 // To obtain pop count for each i16 element starting from the pop count for
26892 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
26893 // right by 8. It is important to shift as i16s as i8 vector shift isn't
26894 // directly supported.
26895 SDValue ShifterV = DAG.getConstant(8, DL, VT);
26896 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26897 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
26898 DAG.getBitcast(ByteVecVT, V));
26899 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26902 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
26903 const X86Subtarget &Subtarget,
26904 SelectionDAG &DAG) {
26905 MVT VT = Op.getSimpleValueType();
26906 MVT EltVT = VT.getVectorElementType();
26907 int NumElts = VT.getVectorNumElements();
26909 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
26911 // Implement a lookup table in register by using an algorithm based on:
26912 // http://wm.ite.pl/articles/sse-popcount.html
26914 // The general idea is that every lower byte nibble in the input vector is an
26915 // index into a in-register pre-computed pop count table. We then split up the
26916 // input vector in two new ones: (1) a vector with only the shifted-right
26917 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
26918 // masked out higher ones) for each byte. PSHUFB is used separately with both
26919 // to index the in-register table. Next, both are added and the result is a
26920 // i8 vector where each element contains the pop count for input byte.
26921 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
26922 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
26923 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
26924 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
26926 SmallVector<SDValue, 64> LUTVec;
26927 for (int i = 0; i < NumElts; ++i)
26928 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26929 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
26930 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
26933 SDValue FourV = DAG.getConstant(4, DL, VT);
26934 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
26937 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
26939 // The input vector is used as the shuffle mask that index elements into the
26940 // LUT. After counting low and high nibbles, add the vector to obtain the
26941 // final pop count per i8 element.
26942 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
26943 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
26944 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
26947 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
26948 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
26949 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26950 SelectionDAG &DAG) {
26951 MVT VT = Op.getSimpleValueType();
26952 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
26953 "Unknown CTPOP type to handle");
26954 SDLoc DL(Op.getNode());
26955 SDValue Op0 = Op.getOperand(0);
26957 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
26958 if (Subtarget.hasVPOPCNTDQ()) {
26959 unsigned NumElems = VT.getVectorNumElements();
26960 assert((VT.getVectorElementType() == MVT::i8 ||
26961 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
26962 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
26963 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26964 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
26965 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
26966 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
26970 // Decompose 256-bit ops into smaller 128-bit ops.
26971 if (VT.is256BitVector() && !Subtarget.hasInt256())
26972 return Lower256IntUnary(Op, DAG);
26974 // Decompose 512-bit ops into smaller 256-bit ops.
26975 if (VT.is512BitVector() && !Subtarget.hasBWI())
26976 return Lower512IntUnary(Op, DAG);
26978 // For element types greater than i8, do vXi8 pop counts and a bytesum.
26979 if (VT.getScalarType() != MVT::i8) {
26980 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
26981 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
26982 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
26983 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
26986 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
26987 if (!Subtarget.hasSSSE3())
26990 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
26993 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26994 SelectionDAG &DAG) {
26995 assert(Op.getSimpleValueType().isVector() &&
26996 "We only do custom lowering for vector population count.");
26997 return LowerVectorCTPOP(Op, Subtarget, DAG);
27000 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
27001 MVT VT = Op.getSimpleValueType();
27002 SDValue In = Op.getOperand(0);
27005 // For scalars, its still beneficial to transfer to/from the SIMD unit to
27006 // perform the BITREVERSE.
27007 if (!VT.isVector()) {
27008 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
27009 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
27010 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
27011 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
27012 DAG.getIntPtrConstant(0, DL));
27015 int NumElts = VT.getVectorNumElements();
27016 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
27018 // Decompose 256-bit ops into smaller 128-bit ops.
27019 if (VT.is256BitVector())
27020 return Lower256IntUnary(Op, DAG);
27022 assert(VT.is128BitVector() &&
27023 "Only 128-bit vector bitreverse lowering supported.");
27025 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
27026 // perform the BSWAP in the shuffle.
27027 // Its best to shuffle using the second operand as this will implicitly allow
27028 // memory folding for multiple vectors.
27029 SmallVector<SDValue, 16> MaskElts;
27030 for (int i = 0; i != NumElts; ++i) {
27031 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
27032 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
27033 int PermuteByte = SourceByte | (2 << 5);
27034 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
27038 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
27039 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
27040 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
27042 return DAG.getBitcast(VT, Res);
27045 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
27046 SelectionDAG &DAG) {
27047 MVT VT = Op.getSimpleValueType();
27049 if (Subtarget.hasXOP() && !VT.is512BitVector())
27050 return LowerBITREVERSE_XOP(Op, DAG);
27052 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
27054 SDValue In = Op.getOperand(0);
27057 // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
27059 if (VT == MVT::v8i64 || VT == MVT::v16i32) {
27060 assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE");
27061 return Lower512IntUnary(Op, DAG);
27064 unsigned NumElts = VT.getVectorNumElements();
27065 assert(VT.getScalarType() == MVT::i8 &&
27066 "Only byte vector BITREVERSE supported");
27068 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
27069 if (VT.is256BitVector() && !Subtarget.hasInt256())
27070 return Lower256IntUnary(Op, DAG);
27072 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
27073 // two nibbles and a PSHUFB lookup to find the bitreverse of each
27074 // 0-15 value (moved to the other nibble).
27075 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
27076 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
27077 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
27079 const int LoLUT[16] = {
27080 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
27081 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
27082 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
27083 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
27084 const int HiLUT[16] = {
27085 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
27086 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
27087 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
27088 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
27090 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
27091 for (unsigned i = 0; i < NumElts; ++i) {
27092 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
27093 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
27096 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
27097 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
27098 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
27099 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
27100 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27103 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
27104 const X86Subtarget &Subtarget) {
27105 unsigned NewOpc = 0;
27106 switch (N->getOpcode()) {
27107 case ISD::ATOMIC_LOAD_ADD:
27108 NewOpc = X86ISD::LADD;
27110 case ISD::ATOMIC_LOAD_SUB:
27111 NewOpc = X86ISD::LSUB;
27113 case ISD::ATOMIC_LOAD_OR:
27114 NewOpc = X86ISD::LOR;
27116 case ISD::ATOMIC_LOAD_XOR:
27117 NewOpc = X86ISD::LXOR;
27119 case ISD::ATOMIC_LOAD_AND:
27120 NewOpc = X86ISD::LAND;
27123 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
27126 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
27128 return DAG.getMemIntrinsicNode(
27129 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
27130 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
27131 /*MemVT=*/N->getSimpleValueType(0), MMO);
27134 /// Lower atomic_load_ops into LOCK-prefixed operations.
27135 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
27136 const X86Subtarget &Subtarget) {
27137 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
27138 SDValue Chain = N->getOperand(0);
27139 SDValue LHS = N->getOperand(1);
27140 SDValue RHS = N->getOperand(2);
27141 unsigned Opc = N->getOpcode();
27142 MVT VT = N->getSimpleValueType(0);
27145 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
27146 // can only be lowered when the result is unused. They should have already
27147 // been transformed into a cmpxchg loop in AtomicExpand.
27148 if (N->hasAnyUseOfValue(0)) {
27149 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
27150 // select LXADD if LOCK_SUB can't be selected.
27151 if (Opc == ISD::ATOMIC_LOAD_SUB) {
27152 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
27153 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
27154 RHS, AN->getMemOperand());
27156 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
27157 "Used AtomicRMW ops other than Add should have been expanded!");
27161 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
27162 // The core idea here is that since the memory location isn't actually
27163 // changing, all we need is a lowering for the *ordering* impacts of the
27164 // atomicrmw. As such, we can chose a different operation and memory
27165 // location to minimize impact on other code.
27166 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
27167 // On X86, the only ordering which actually requires an instruction is
27168 // seq_cst which isn't SingleThread, everything just needs to be preserved
27169 // during codegen and then dropped. Note that we expect (but don't assume),
27170 // that orderings other than seq_cst and acq_rel have been canonicalized to
27171 // a store or load.
27172 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
27173 AN->getSyncScopeID() == SyncScope::System) {
27174 // Prefer a locked operation against a stack location to minimize cache
27175 // traffic. This assumes that stack locations are very likely to be
27176 // accessed only by the owning thread.
27177 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
27178 assert(!N->hasAnyUseOfValue(0));
27179 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27180 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27181 DAG.getUNDEF(VT), NewChain);
27183 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
27184 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
27185 assert(!N->hasAnyUseOfValue(0));
27186 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27187 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27188 DAG.getUNDEF(VT), NewChain);
27191 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
27192 // RAUW the chain, but don't worry about the result, as it's unused.
27193 assert(!N->hasAnyUseOfValue(0));
27194 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27195 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27196 DAG.getUNDEF(VT), LockOp.getValue(1));
27199 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
27200 const X86Subtarget &Subtarget) {
27201 auto *Node = cast<AtomicSDNode>(Op.getNode());
27203 EVT VT = Node->getMemoryVT();
27205 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
27206 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
27208 // If this store is not sequentially consistent and the type is legal
27209 // we can just keep it.
27210 if (!IsSeqCst && IsTypeLegal)
27213 if (VT == MVT::i64 && !IsTypeLegal) {
27214 // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
27215 // FIXME: Use movlps with SSE1.
27216 // FIXME: Use fist with X87.
27217 bool NoImplicitFloatOps =
27218 DAG.getMachineFunction().getFunction().hasFnAttribute(
27219 Attribute::NoImplicitFloat);
27220 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27221 Subtarget.hasSSE2()) {
27222 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
27223 Node->getOperand(2));
27224 SDVTList Tys = DAG.getVTList(MVT::Other);
27225 SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
27226 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
27228 Node->getMemOperand());
27230 // If this is a sequentially consistent store, also emit an appropriate
27233 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
27239 // Convert seq_cst store -> xchg
27240 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
27241 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
27242 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
27243 Node->getMemoryVT(),
27244 Node->getOperand(0),
27245 Node->getOperand(1), Node->getOperand(2),
27246 Node->getMemOperand());
27247 return Swap.getValue(1);
27250 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
27251 SDNode *N = Op.getNode();
27252 MVT VT = N->getSimpleValueType(0);
27254 // Let legalize expand this if it isn't a legal type yet.
27255 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
27258 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
27261 // Set the carry flag.
27262 SDValue Carry = Op.getOperand(2);
27263 EVT CarryVT = Carry.getValueType();
27264 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
27265 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
27266 Carry, DAG.getConstant(NegOne, DL, CarryVT));
27268 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
27269 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
27270 Op.getOperand(1), Carry.getValue(1));
27272 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
27273 if (N->getValueType(1) == MVT::i1)
27274 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
27276 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
27279 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
27280 SelectionDAG &DAG) {
27281 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
27283 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
27284 // which returns the values as { float, float } (in XMM0) or
27285 // { double, double } (which is returned in XMM0, XMM1).
27287 SDValue Arg = Op.getOperand(0);
27288 EVT ArgVT = Arg.getValueType();
27289 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
27291 TargetLowering::ArgListTy Args;
27292 TargetLowering::ArgListEntry Entry;
27296 Entry.IsSExt = false;
27297 Entry.IsZExt = false;
27298 Args.push_back(Entry);
27300 bool isF64 = ArgVT == MVT::f64;
27301 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
27302 // the small struct {f32, f32} is returned in (eax, edx). For f64,
27303 // the results are returned via SRet in memory.
27304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27305 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
27306 const char *LibcallName = TLI.getLibcallName(LC);
27308 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
27310 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
27311 : (Type *)VectorType::get(ArgTy, 4);
27313 TargetLowering::CallLoweringInfo CLI(DAG);
27314 CLI.setDebugLoc(dl)
27315 .setChain(DAG.getEntryNode())
27316 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
27318 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
27321 // Returned in xmm0 and xmm1.
27322 return CallResult.first;
27324 // Returned in bits 0:31 and 32:64 xmm0.
27325 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
27326 CallResult.first, DAG.getIntPtrConstant(0, dl));
27327 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
27328 CallResult.first, DAG.getIntPtrConstant(1, dl));
27329 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
27330 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
27333 /// Widen a vector input to a vector of NVT. The
27334 /// input vector must have the same element type as NVT.
27335 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
27336 bool FillWithZeroes = false) {
27337 // Check if InOp already has the right width.
27338 MVT InVT = InOp.getSimpleValueType();
27342 if (InOp.isUndef())
27343 return DAG.getUNDEF(NVT);
27345 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
27346 "input and widen element type must match");
27348 unsigned InNumElts = InVT.getVectorNumElements();
27349 unsigned WidenNumElts = NVT.getVectorNumElements();
27350 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
27351 "Unexpected request for vector widening");
27354 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
27355 InOp.getNumOperands() == 2) {
27356 SDValue N1 = InOp.getOperand(1);
27357 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
27359 InOp = InOp.getOperand(0);
27360 InVT = InOp.getSimpleValueType();
27361 InNumElts = InVT.getVectorNumElements();
27364 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
27365 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
27366 SmallVector<SDValue, 16> Ops;
27367 for (unsigned i = 0; i < InNumElts; ++i)
27368 Ops.push_back(InOp.getOperand(i));
27370 EVT EltVT = InOp.getOperand(0).getValueType();
27372 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
27373 DAG.getUNDEF(EltVT);
27374 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
27375 Ops.push_back(FillVal);
27376 return DAG.getBuildVector(NVT, dl, Ops);
27378 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
27380 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
27381 InOp, DAG.getIntPtrConstant(0, dl));
27384 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
27385 SelectionDAG &DAG) {
27386 assert(Subtarget.hasAVX512() &&
27387 "MGATHER/MSCATTER are supported on AVX-512 arch only");
27389 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
27390 SDValue Src = N->getValue();
27391 MVT VT = Src.getSimpleValueType();
27392 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
27395 SDValue Scale = N->getScale();
27396 SDValue Index = N->getIndex();
27397 SDValue Mask = N->getMask();
27398 SDValue Chain = N->getChain();
27399 SDValue BasePtr = N->getBasePtr();
27401 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
27402 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
27403 // If the index is v2i64 and we have VLX we can use xmm for data and index.
27404 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
27405 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27406 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
27407 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
27408 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
27409 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
27410 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
27411 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
27412 return SDValue(NewScatter.getNode(), 1);
27417 MVT IndexVT = Index.getSimpleValueType();
27418 MVT MaskVT = Mask.getSimpleValueType();
27420 // If the index is v2i32, we're being called by type legalization and we
27421 // should just let the default handling take care of it.
27422 if (IndexVT == MVT::v2i32)
27425 // If we don't have VLX and neither the passthru or index is 512-bits, we
27426 // need to widen until one is.
27427 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
27428 !Index.getSimpleValueType().is512BitVector()) {
27429 // Determine how much we need to widen by to get a 512-bit type.
27430 unsigned Factor = std::min(512/VT.getSizeInBits(),
27431 512/IndexVT.getSizeInBits());
27432 unsigned NumElts = VT.getVectorNumElements() * Factor;
27434 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27435 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27436 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27438 Src = ExtendToType(Src, VT, DAG);
27439 Index = ExtendToType(Index, IndexVT, DAG);
27440 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27443 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
27444 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
27445 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
27446 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
27447 return SDValue(NewScatter.getNode(), 1);
27450 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
27451 SelectionDAG &DAG) {
27453 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
27454 MVT VT = Op.getSimpleValueType();
27455 MVT ScalarVT = VT.getScalarType();
27456 SDValue Mask = N->getMask();
27457 MVT MaskVT = Mask.getSimpleValueType();
27458 SDValue PassThru = N->getPassThru();
27461 // Handle AVX masked loads which don't support passthru other than 0.
27462 if (MaskVT.getVectorElementType() != MVT::i1) {
27463 // We also allow undef in the isel pattern.
27464 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
27467 SDValue NewLoad = DAG.getMaskedLoad(VT, dl, N->getChain(),
27468 N->getBasePtr(), Mask,
27469 getZeroVector(VT, Subtarget, DAG, dl),
27470 N->getMemoryVT(), N->getMemOperand(),
27471 N->getExtensionType(),
27472 N->isExpandingLoad());
27474 SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
27476 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
27479 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
27480 "Expanding masked load is supported on AVX-512 target only!");
27482 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
27483 "Expanding masked load is supported for 32 and 64-bit types only!");
27485 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27486 "Cannot lower masked load op.");
27488 assert((ScalarVT.getSizeInBits() >= 32 ||
27489 (Subtarget.hasBWI() &&
27490 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
27491 "Unsupported masked load op.");
27493 // This operation is legal for targets with VLX, but without
27494 // VLX the vector should be widened to 512 bit
27495 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
27496 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27497 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
27499 // Mask element has to be i1.
27500 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
27501 "Unexpected mask type");
27503 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27505 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27506 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
27507 N->getBasePtr(), Mask, PassThru,
27508 N->getMemoryVT(), N->getMemOperand(),
27509 N->getExtensionType(),
27510 N->isExpandingLoad());
27512 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
27513 NewLoad.getValue(0),
27514 DAG.getIntPtrConstant(0, dl));
27515 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
27516 return DAG.getMergeValues(RetOps, dl);
27519 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
27520 SelectionDAG &DAG) {
27521 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
27522 SDValue DataToStore = N->getValue();
27523 MVT VT = DataToStore.getSimpleValueType();
27524 MVT ScalarVT = VT.getScalarType();
27525 SDValue Mask = N->getMask();
27528 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
27529 "Expanding masked load is supported on AVX-512 target only!");
27531 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
27532 "Expanding masked load is supported for 32 and 64-bit types only!");
27534 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27535 "Cannot lower masked store op.");
27537 assert((ScalarVT.getSizeInBits() >= 32 ||
27538 (Subtarget.hasBWI() &&
27539 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
27540 "Unsupported masked store op.");
27542 // This operation is legal for targets with VLX, but without
27543 // VLX the vector should be widened to 512 bit
27544 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
27545 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27547 // Mask element has to be i1.
27548 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
27549 "Unexpected mask type");
27551 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27553 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
27554 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27555 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
27556 Mask, N->getMemoryVT(), N->getMemOperand(),
27557 N->isTruncatingStore(), N->isCompressingStore());
27560 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
27561 SelectionDAG &DAG) {
27562 assert(Subtarget.hasAVX2() &&
27563 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
27565 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
27567 MVT VT = Op.getSimpleValueType();
27568 SDValue Index = N->getIndex();
27569 SDValue Mask = N->getMask();
27570 SDValue PassThru = N->getPassThru();
27571 MVT IndexVT = Index.getSimpleValueType();
27572 MVT MaskVT = Mask.getSimpleValueType();
27574 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
27576 // If the index is v2i32, we're being called by type legalization.
27577 if (IndexVT == MVT::v2i32)
27580 // If we don't have VLX and neither the passthru or index is 512-bits, we
27581 // need to widen until one is.
27583 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27584 !IndexVT.is512BitVector()) {
27585 // Determine how much we need to widen by to get a 512-bit type.
27586 unsigned Factor = std::min(512/VT.getSizeInBits(),
27587 512/IndexVT.getSizeInBits());
27589 unsigned NumElts = VT.getVectorNumElements() * Factor;
27591 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27592 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27593 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27595 PassThru = ExtendToType(PassThru, VT, DAG);
27596 Index = ExtendToType(Index, IndexVT, DAG);
27597 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27600 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
27602 SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
27603 DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
27604 N->getMemOperand());
27605 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
27606 NewGather, DAG.getIntPtrConstant(0, dl));
27607 return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
27610 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
27611 SelectionDAG &DAG) const {
27612 // TODO: Eventually, the lowering of these nodes should be informed by or
27613 // deferred to the GC strategy for the function in which they appear. For
27614 // now, however, they must be lowered to something. Since they are logically
27615 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27616 // require special handling for these nodes), lower them as literal NOOPs for
27618 SmallVector<SDValue, 2> Ops;
27620 Ops.push_back(Op.getOperand(0));
27621 if (Op->getGluedNode())
27622 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27625 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27626 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27631 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
27632 SelectionDAG &DAG) const {
27633 // TODO: Eventually, the lowering of these nodes should be informed by or
27634 // deferred to the GC strategy for the function in which they appear. For
27635 // now, however, they must be lowered to something. Since they are logically
27636 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27637 // require special handling for these nodes), lower them as literal NOOPs for
27639 SmallVector<SDValue, 2> Ops;
27641 Ops.push_back(Op.getOperand(0));
27642 if (Op->getGluedNode())
27643 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27646 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27647 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27652 SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
27653 RTLIB::Libcall Call) const {
27654 SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end());
27655 MakeLibCallOptions CallOptions;
27656 return makeLibCall(DAG, Call, MVT::f128, Ops, CallOptions, SDLoc(Op)).first;
27659 /// Provide custom lowering hooks for some operations.
27660 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
27661 switch (Op.getOpcode()) {
27662 default: llvm_unreachable("Should not custom lower this!");
27663 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
27664 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
27665 return LowerCMP_SWAP(Op, Subtarget, DAG);
27666 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
27667 case ISD::ATOMIC_LOAD_ADD:
27668 case ISD::ATOMIC_LOAD_SUB:
27669 case ISD::ATOMIC_LOAD_OR:
27670 case ISD::ATOMIC_LOAD_XOR:
27671 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
27672 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
27673 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
27674 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
27675 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
27676 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
27677 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
27678 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
27679 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
27680 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
27681 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
27682 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
27683 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
27684 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
27685 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
27686 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
27687 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
27688 case ISD::SHL_PARTS:
27689 case ISD::SRA_PARTS:
27690 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
27692 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
27693 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
27694 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
27695 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
27696 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
27697 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
27698 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
27699 case ISD::ZERO_EXTEND_VECTOR_INREG:
27700 case ISD::SIGN_EXTEND_VECTOR_INREG:
27701 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
27702 case ISD::FP_TO_SINT:
27703 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
27704 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
27705 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
27706 case ISD::STRICT_FP_ROUND: return LowerSTRICT_FP_ROUND(Op, DAG);
27707 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
27708 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
27710 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
27711 case ISD::FMUL: return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
27712 case ISD::FDIV: return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
27714 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
27715 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
27716 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
27717 case ISD::SETCC: return LowerSETCC(Op, DAG);
27718 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
27719 case ISD::SELECT: return LowerSELECT(Op, DAG);
27720 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
27721 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
27722 case ISD::VASTART: return LowerVASTART(Op, DAG);
27723 case ISD::VAARG: return LowerVAARG(Op, DAG);
27724 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
27725 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
27726 case ISD::INTRINSIC_VOID:
27727 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
27728 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
27729 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
27730 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
27731 case ISD::FRAME_TO_ARGS_OFFSET:
27732 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
27733 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
27734 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
27735 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
27736 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
27737 case ISD::EH_SJLJ_SETUP_DISPATCH:
27738 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
27739 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
27740 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
27741 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
27743 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
27745 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
27746 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
27748 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
27750 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
27753 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
27759 case ISD::UMULO: return LowerXALUO(Op, DAG);
27760 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
27761 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
27762 case ISD::ADDCARRY:
27763 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
27765 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
27769 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
27773 case ISD::UMIN: return LowerMINMAX(Op, DAG);
27774 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
27775 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
27776 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
27777 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
27778 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
27779 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
27780 case ISD::GC_TRANSITION_START:
27781 return LowerGC_TRANSITION_START(Op, DAG);
27782 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
27786 /// Places new result values for the node in Results (their number
27787 /// and types must exactly match those of the original return values of
27788 /// the node), or leaves Results empty, which indicates that the node is not
27789 /// to be custom lowered after all.
27790 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
27791 SmallVectorImpl<SDValue> &Results,
27792 SelectionDAG &DAG) const {
27793 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
27795 if (!Res.getNode())
27798 // If the original node has one result, take the return value from
27799 // LowerOperation as is. It might not be result number 0.
27800 if (N->getNumValues() == 1) {
27801 Results.push_back(Res);
27805 // If the original node has multiple results, then the return node should
27806 // have the same number of results.
27807 assert((N->getNumValues() == Res->getNumValues()) &&
27808 "Lowering returned the wrong number of results!");
27810 // Places new result values base on N result number.
27811 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
27812 Results.push_back(Res.getValue(I));
27815 /// Replace a node with an illegal result type with a new node built out of
27817 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
27818 SmallVectorImpl<SDValue>&Results,
27819 SelectionDAG &DAG) const {
27821 switch (N->getOpcode()) {
27824 dbgs() << "ReplaceNodeResults: ";
27827 llvm_unreachable("Do not know how to custom type legalize this operation!");
27829 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
27830 // Use a v2i64 if possible.
27831 bool NoImplicitFloatOps =
27832 DAG.getMachineFunction().getFunction().hasFnAttribute(
27833 Attribute::NoImplicitFloat);
27834 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
27836 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
27837 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
27838 // Bit count should fit in 32-bits, extract it as that and then zero
27839 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
27840 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
27841 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
27842 DAG.getIntPtrConstant(0, dl));
27843 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
27844 Results.push_back(Wide);
27849 EVT VT = N->getValueType(0);
27850 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
27851 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
27852 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
27853 // elements are needed.
27854 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
27855 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
27856 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
27857 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
27858 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27859 unsigned NumConcats = 16 / VT.getVectorNumElements();
27860 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27861 ConcatOps[0] = Res;
27862 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
27863 Results.push_back(Res);
27866 case X86ISD::VPMADDWD:
27867 case X86ISD::AVG: {
27868 // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
27869 // X86ISD::AVG/VPMADDWD by widening.
27870 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27872 EVT VT = N->getValueType(0);
27873 EVT InVT = N->getOperand(0).getValueType();
27874 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
27875 "Expected a VT that divides into 128 bits.");
27876 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
27877 "Unexpected type action!");
27878 unsigned NumConcat = 128 / InVT.getSizeInBits();
27880 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
27881 InVT.getVectorElementType(),
27882 NumConcat * InVT.getVectorNumElements());
27883 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
27884 VT.getVectorElementType(),
27885 NumConcat * VT.getVectorNumElements());
27887 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
27888 Ops[0] = N->getOperand(0);
27889 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27890 Ops[0] = N->getOperand(1);
27891 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27893 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
27894 Results.push_back(Res);
27898 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27899 assert(N->getValueType(0) == MVT::i64 &&
27900 "Unexpected type (!= i64) on ABS.");
27901 MVT HalfT = MVT::i32;
27902 SDValue Lo, Hi, Tmp;
27903 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
27905 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27906 DAG.getConstant(0, dl, HalfT));
27907 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27908 DAG.getConstant(1, dl, HalfT));
27910 ISD::SRA, dl, HalfT, Hi,
27911 DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
27912 TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
27913 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
27914 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
27915 SDValue(Lo.getNode(), 1));
27916 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
27917 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
27918 Results.push_back(Lo);
27919 Results.push_back(Hi);
27922 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
27923 case X86ISD::FMINC:
27925 case X86ISD::FMAXC:
27926 case X86ISD::FMAX: {
27927 EVT VT = N->getValueType(0);
27928 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
27929 SDValue UNDEF = DAG.getUNDEF(VT);
27930 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27931 N->getOperand(0), UNDEF);
27932 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27933 N->getOperand(1), UNDEF);
27934 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
27941 EVT VT = N->getValueType(0);
27942 if (VT.isVector()) {
27943 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
27944 "Unexpected type action!");
27945 // If this RHS is a constant splat vector we can widen this and let
27946 // division/remainder by constant optimize it.
27947 // TODO: Can we do something for non-splat?
27949 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
27950 unsigned NumConcats = 128 / VT.getSizeInBits();
27951 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
27952 Ops0[0] = N->getOperand(0);
27953 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
27954 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
27955 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
27956 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
27957 Results.push_back(Res);
27965 case ISD::UDIVREM: {
27966 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
27967 Results.push_back(V);
27970 case ISD::TRUNCATE: {
27971 MVT VT = N->getSimpleValueType(0);
27972 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27975 // The generic legalizer will try to widen the input type to the same
27976 // number of elements as the widened result type. But this isn't always
27977 // the best thing so do some custom legalization to avoid some cases.
27978 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
27979 SDValue In = N->getOperand(0);
27980 EVT InVT = In.getValueType();
27982 unsigned InBits = InVT.getSizeInBits();
27983 if (128 % InBits == 0) {
27984 // 128 bit and smaller inputs should avoid truncate all together and
27985 // just use a build_vector that will become a shuffle.
27986 // TODO: Widen and use a shuffle directly?
27987 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
27988 EVT EltVT = VT.getVectorElementType();
27989 unsigned WidenNumElts = WidenVT.getVectorNumElements();
27990 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
27991 // Use the original element count so we don't do more scalar opts than
27993 unsigned MinElts = VT.getVectorNumElements();
27994 for (unsigned i=0; i < MinElts; ++i) {
27995 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
27996 DAG.getIntPtrConstant(i, dl));
27997 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
27999 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
28002 // With AVX512 there are some cases that can use a target specific
28003 // truncate node to go from 256/512 to less than 128 with zeros in the
28004 // upper elements of the 128 bit result.
28005 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
28006 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
28007 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
28008 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28011 // There's one case we can widen to 512 bits and use VTRUNC.
28012 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
28013 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
28014 DAG.getUNDEF(MVT::v4i64));
28015 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28019 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
28020 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
28021 isTypeLegal(MVT::v4i64)) {
28022 // Input needs to be split and output needs to widened. Let's use two
28023 // VTRUNCs, and shuffle their results together into the wider type.
28025 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
28027 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
28028 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
28029 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
28030 { 0, 1, 2, 3, 16, 17, 18, 19,
28031 -1, -1, -1, -1, -1, -1, -1, -1 });
28032 Results.push_back(Res);
28038 case ISD::ANY_EXTEND:
28039 // Right now, only MVT::v8i8 has Custom action for an illegal type.
28040 // It's intended to custom handle the input type.
28041 assert(N->getValueType(0) == MVT::v8i8 &&
28042 "Do not know how to legalize this Node");
28044 case ISD::SIGN_EXTEND:
28045 case ISD::ZERO_EXTEND: {
28046 EVT VT = N->getValueType(0);
28047 SDValue In = N->getOperand(0);
28048 EVT InVT = In.getValueType();
28049 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
28050 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
28051 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
28052 "Unexpected type action!");
28053 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
28054 // Custom split this so we can extend i8/i16->i32 invec. This is better
28055 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
28056 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
28057 // we allow the sra from the extend to i32 to be shared by the split.
28058 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
28060 // Fill a vector with sign bits for each element.
28061 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
28062 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
28064 // Create an unpackl and unpackh to interleave the sign bits then bitcast
28066 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28068 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
28069 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28071 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
28073 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28074 Results.push_back(Res);
28078 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
28079 if (!InVT.is128BitVector()) {
28080 // Not a 128 bit vector, but maybe type legalization will promote
28082 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
28084 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
28085 if (!InVT.is128BitVector())
28088 // Promote the input to 128 bits. Type legalization will turn this into
28089 // zext_inreg/sext_inreg.
28090 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
28093 // Perform custom splitting instead of the two stage extend we would get
28096 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
28097 assert(isTypeLegal(LoVT) && "Split VT not legal?");
28099 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
28101 // We need to shift the input over by half the number of elements.
28102 unsigned NumElts = InVT.getVectorNumElements();
28103 unsigned HalfNumElts = NumElts / 2;
28104 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
28105 for (unsigned i = 0; i != HalfNumElts; ++i)
28106 ShufMask[i] = i + HalfNumElts;
28108 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
28109 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
28111 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28112 Results.push_back(Res);
28116 case ISD::FP_TO_SINT:
28117 case ISD::FP_TO_UINT: {
28118 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
28119 EVT VT = N->getValueType(0);
28120 SDValue Src = N->getOperand(0);
28121 EVT SrcVT = Src.getValueType();
28123 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
28124 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28125 "Unexpected type action!");
28127 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
28128 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
28129 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
28130 VT.getVectorNumElements());
28131 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
28133 // Preserve what we know about the size of the original result. Except
28134 // when the result is v2i32 since we can't widen the assert.
28135 if (PromoteVT != MVT::v2i32)
28136 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
28138 dl, PromoteVT, Res,
28139 DAG.getValueType(VT.getVectorElementType()));
28141 // Truncate back to the original width.
28142 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
28144 // Now widen to 128 bits.
28145 unsigned NumConcats = 128 / VT.getSizeInBits();
28146 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
28147 VT.getVectorNumElements() * NumConcats);
28148 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
28149 ConcatOps[0] = Res;
28150 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
28151 Results.push_back(Res);
28156 if (VT == MVT::v2i32) {
28157 assert((IsSigned || Subtarget.hasAVX512()) &&
28158 "Can only handle signed conversion without AVX512");
28159 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28160 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28161 "Unexpected type action!");
28162 if (Src.getValueType() == MVT::v2f64) {
28163 if (!IsSigned && !Subtarget.hasVLX()) {
28164 // If we have VLX we can emit a target specific FP_TO_UINT node,
28165 // otherwise we can defer to the generic legalizer which will widen
28166 // the input as well. This will be further widened during op
28167 // legalization to v8i32<-v8f64.
28170 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
28171 SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
28172 Results.push_back(Res);
28176 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
28177 // so early out here.
28181 assert(!VT.isVector() && "Vectors should have been handled above!");
28183 if (Subtarget.hasDQI() && VT == MVT::i64 &&
28184 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
28185 assert(!Subtarget.is64Bit() && "i64 should be legal");
28186 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
28187 // Using a 256-bit input here to guarantee 128-bit input for f32 case.
28188 // TODO: Use 128-bit vectors for f64 case?
28189 // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
28190 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
28191 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
28193 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
28194 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
28195 DAG.getConstantFP(0.0, dl, VecInVT), Src,
28197 Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
28198 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
28199 Results.push_back(Res);
28203 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
28204 Results.push_back(V);
28207 case ISD::SINT_TO_FP: {
28208 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
28209 SDValue Src = N->getOperand(0);
28210 if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
28212 Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
28215 case ISD::UINT_TO_FP: {
28216 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28217 EVT VT = N->getValueType(0);
28218 if (VT != MVT::v2f32)
28220 SDValue Src = N->getOperand(0);
28221 EVT SrcVT = Src.getValueType();
28222 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
28223 Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
28226 if (SrcVT != MVT::v2i32)
28228 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
28230 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
28231 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
28232 DAG.getBitcast(MVT::v2i64, VBias));
28233 Or = DAG.getBitcast(MVT::v2f64, Or);
28234 // TODO: Are there any fast-math-flags to propagate here?
28235 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
28236 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
28239 case ISD::FP_ROUND: {
28240 if (!isTypeLegal(N->getOperand(0).getValueType()))
28242 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
28243 Results.push_back(V);
28246 case ISD::FP_EXTEND: {
28247 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
28248 // No other ValueType for FP_EXTEND should reach this point.
28249 assert(N->getValueType(0) == MVT::v2f32 &&
28250 "Do not know how to legalize this Node");
28253 case ISD::INTRINSIC_W_CHAIN: {
28254 unsigned IntNo = N->getConstantOperandVal(1);
28256 default : llvm_unreachable("Do not know how to custom type "
28257 "legalize this intrinsic operation!");
28258 case Intrinsic::x86_rdtsc:
28259 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
28261 case Intrinsic::x86_rdtscp:
28262 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
28264 case Intrinsic::x86_rdpmc:
28265 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
28268 case Intrinsic::x86_xgetbv:
28269 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
28274 case ISD::READCYCLECOUNTER: {
28275 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
28277 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
28278 EVT T = N->getValueType(0);
28279 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
28280 bool Regs64bit = T == MVT::i128;
28281 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
28282 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
28283 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
28284 SDValue cpInL, cpInH;
28285 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
28286 DAG.getConstant(0, dl, HalfT));
28287 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
28288 DAG.getConstant(1, dl, HalfT));
28289 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
28290 Regs64bit ? X86::RAX : X86::EAX,
28292 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
28293 Regs64bit ? X86::RDX : X86::EDX,
28294 cpInH, cpInL.getValue(1));
28295 SDValue swapInL, swapInH;
28296 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
28297 DAG.getConstant(0, dl, HalfT));
28298 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
28299 DAG.getConstant(1, dl, HalfT));
28301 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
28302 swapInH, cpInH.getValue(1));
28303 // If the current function needs the base pointer, RBX,
28304 // we shouldn't use cmpxchg directly.
28305 // Indeed the lowering of that instruction will clobber
28306 // that register and since RBX will be a reserved register
28307 // the register allocator will not make sure its value will
28308 // be properly saved and restored around this live-range.
28309 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
28311 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28312 Register BasePtr = TRI->getBaseRegister();
28313 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
28314 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
28315 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
28316 // ISel prefers the LCMPXCHG64 variant.
28317 // If that assert breaks, that means it is not the case anymore,
28318 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
28319 // not just EBX. This is a matter of accepting i64 input for that
28320 // pseudo, and restoring into the register of the right wide
28321 // in expand pseudo. Everything else should just work.
28322 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
28323 "Saving only half of the RBX");
28324 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
28325 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
28326 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
28327 Regs64bit ? X86::RBX : X86::EBX,
28328 HalfT, swapInH.getValue(1));
28329 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
28331 /*Glue*/ RBXSave.getValue(2)};
28332 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
28335 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
28336 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
28337 Regs64bit ? X86::RBX : X86::EBX, swapInL,
28338 swapInH.getValue(1));
28339 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
28340 swapInL.getValue(1)};
28341 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
28343 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
28344 Regs64bit ? X86::RAX : X86::EAX,
28345 HalfT, Result.getValue(1));
28346 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
28347 Regs64bit ? X86::RDX : X86::EDX,
28348 HalfT, cpOutL.getValue(2));
28349 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
28351 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
28352 MVT::i32, cpOutH.getValue(2));
28353 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
28354 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
28356 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
28357 Results.push_back(Success);
28358 Results.push_back(EFLAGS.getValue(1));
28361 case ISD::ATOMIC_LOAD: {
28362 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
28363 bool NoImplicitFloatOps =
28364 DAG.getMachineFunction().getFunction().hasFnAttribute(
28365 Attribute::NoImplicitFloat);
28366 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
28367 auto *Node = cast<AtomicSDNode>(N);
28368 if (Subtarget.hasSSE2()) {
28369 // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
28371 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
28372 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
28373 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
28374 MVT::i64, Node->getMemOperand());
28375 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
28376 DAG.getIntPtrConstant(0, dl));
28377 Results.push_back(Res);
28378 Results.push_back(Ld.getValue(1));
28381 if (Subtarget.hasX87()) {
28382 // First load this into an 80-bit X87 register. This will put the whole
28383 // integer into the significand.
28384 // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
28385 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
28386 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
28387 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
28388 dl, Tys, Ops, MVT::i64,
28389 Node->getMemOperand());
28390 SDValue Chain = Result.getValue(1);
28391 SDValue InFlag = Result.getValue(2);
28393 // Now store the X87 register to a stack temporary and convert to i64.
28394 // This store is not atomic and doesn't need to be.
28395 // FIXME: We don't need a stack temporary if the result of the load
28396 // is already being stored. We could just directly store there.
28397 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
28398 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28399 MachinePointerInfo MPI =
28400 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28401 SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
28402 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
28403 DAG.getVTList(MVT::Other), StoreOps,
28404 MVT::i64, MPI, 0 /*Align*/,
28405 MachineMemOperand::MOStore);
28407 // Finally load the value back from the stack temporary and return it.
28408 // This load is not atomic and doesn't need to be.
28409 // This load will be further type legalized.
28410 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
28411 Results.push_back(Result);
28412 Results.push_back(Result.getValue(1));
28416 // TODO: Use MOVLPS when SSE1 is available?
28417 // Delegate to generic TypeLegalization. Situations we can really handle
28418 // should have already been dealt with by AtomicExpandPass.cpp.
28421 case ISD::ATOMIC_SWAP:
28422 case ISD::ATOMIC_LOAD_ADD:
28423 case ISD::ATOMIC_LOAD_SUB:
28424 case ISD::ATOMIC_LOAD_AND:
28425 case ISD::ATOMIC_LOAD_OR:
28426 case ISD::ATOMIC_LOAD_XOR:
28427 case ISD::ATOMIC_LOAD_NAND:
28428 case ISD::ATOMIC_LOAD_MIN:
28429 case ISD::ATOMIC_LOAD_MAX:
28430 case ISD::ATOMIC_LOAD_UMIN:
28431 case ISD::ATOMIC_LOAD_UMAX:
28432 // Delegate to generic TypeLegalization. Situations we can really handle
28433 // should have already been dealt with by AtomicExpandPass.cpp.
28436 case ISD::BITCAST: {
28437 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28438 EVT DstVT = N->getValueType(0);
28439 EVT SrcVT = N->getOperand(0).getValueType();
28441 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
28442 // we can split using the k-register rather than memory.
28443 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
28444 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
28446 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28447 Lo = DAG.getBitcast(MVT::i32, Lo);
28448 Hi = DAG.getBitcast(MVT::i32, Hi);
28449 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
28450 Results.push_back(Res);
28454 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
28455 if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
28456 SrcVT.isVector() && isTypeLegal(SrcVT)) {
28458 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28459 MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
28460 Lo = DAG.getBitcast(CastVT, Lo);
28461 Hi = DAG.getBitcast(CastVT, Hi);
28462 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
28463 Results.push_back(Res);
28467 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
28468 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
28469 "Unexpected type action!");
28470 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
28471 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
28472 Results.push_back(Res);
28478 case ISD::MGATHER: {
28479 EVT VT = N->getValueType(0);
28480 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
28481 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28482 auto *Gather = cast<MaskedGatherSDNode>(N);
28483 SDValue Index = Gather->getIndex();
28484 if (Index.getValueType() != MVT::v2i64)
28486 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28487 "Unexpected type action!");
28488 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
28489 SDValue Mask = Gather->getMask();
28490 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28491 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
28492 Gather->getPassThru(),
28494 if (!Subtarget.hasVLX()) {
28495 // We need to widen the mask, but the instruction will only use 2
28496 // of its elements. So we can use undef.
28497 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28498 DAG.getUNDEF(MVT::v2i1));
28499 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28501 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28502 Gather->getBasePtr(), Index, Gather->getScale() };
28503 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28504 DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
28505 Gather->getMemoryVT(), Gather->getMemOperand());
28506 Results.push_back(Res);
28507 Results.push_back(Res.getValue(2));
28513 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
28514 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
28515 // cast since type legalization will try to use an i64 load.
28516 MVT VT = N->getSimpleValueType(0);
28517 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
28518 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28519 "Unexpected type action!");
28520 if (!ISD::isNON_EXTLoad(N))
28522 auto *Ld = cast<LoadSDNode>(N);
28523 if (Subtarget.hasSSE2()) {
28524 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
28525 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
28526 Ld->getPointerInfo(), Ld->getAlignment(),
28527 Ld->getMemOperand()->getFlags());
28528 SDValue Chain = Res.getValue(1);
28529 MVT VecVT = MVT::getVectorVT(LdVT, 2);
28530 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
28531 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
28532 Res = DAG.getBitcast(WideVT, Res);
28533 Results.push_back(Res);
28534 Results.push_back(Chain);
28537 assert(Subtarget.hasSSE1() && "Expected SSE");
28538 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
28539 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
28540 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
28541 MVT::i64, Ld->getMemOperand());
28542 Results.push_back(Res);
28543 Results.push_back(Res.getValue(1));
28549 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
28550 switch ((X86ISD::NodeType)Opcode) {
28551 case X86ISD::FIRST_NUMBER: break;
28552 case X86ISD::BSF: return "X86ISD::BSF";
28553 case X86ISD::BSR: return "X86ISD::BSR";
28554 case X86ISD::SHLD: return "X86ISD::SHLD";
28555 case X86ISD::SHRD: return "X86ISD::SHRD";
28556 case X86ISD::FAND: return "X86ISD::FAND";
28557 case X86ISD::FANDN: return "X86ISD::FANDN";
28558 case X86ISD::FOR: return "X86ISD::FOR";
28559 case X86ISD::FXOR: return "X86ISD::FXOR";
28560 case X86ISD::FILD: return "X86ISD::FILD";
28561 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
28562 case X86ISD::FIST: return "X86ISD::FIST";
28563 case X86ISD::FP_TO_INT_IN_MEM: return "X86ISD::FP_TO_INT_IN_MEM";
28564 case X86ISD::FLD: return "X86ISD::FLD";
28565 case X86ISD::FST: return "X86ISD::FST";
28566 case X86ISD::CALL: return "X86ISD::CALL";
28567 case X86ISD::BT: return "X86ISD::BT";
28568 case X86ISD::CMP: return "X86ISD::CMP";
28569 case X86ISD::COMI: return "X86ISD::COMI";
28570 case X86ISD::UCOMI: return "X86ISD::UCOMI";
28571 case X86ISD::CMPM: return "X86ISD::CMPM";
28572 case X86ISD::CMPM_SAE: return "X86ISD::CMPM_SAE";
28573 case X86ISD::SETCC: return "X86ISD::SETCC";
28574 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
28575 case X86ISD::FSETCC: return "X86ISD::FSETCC";
28576 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
28577 case X86ISD::FSETCCM_SAE: return "X86ISD::FSETCCM_SAE";
28578 case X86ISD::CMOV: return "X86ISD::CMOV";
28579 case X86ISD::BRCOND: return "X86ISD::BRCOND";
28580 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
28581 case X86ISD::IRET: return "X86ISD::IRET";
28582 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
28583 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
28584 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
28585 case X86ISD::Wrapper: return "X86ISD::Wrapper";
28586 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
28587 case X86ISD::MOVQ2DQ: return "X86ISD::MOVQ2DQ";
28588 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
28589 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
28590 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
28591 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
28592 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
28593 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
28594 case X86ISD::PINSRB: return "X86ISD::PINSRB";
28595 case X86ISD::PINSRW: return "X86ISD::PINSRW";
28596 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
28597 case X86ISD::ANDNP: return "X86ISD::ANDNP";
28598 case X86ISD::BLENDI: return "X86ISD::BLENDI";
28599 case X86ISD::BLENDV: return "X86ISD::BLENDV";
28600 case X86ISD::HADD: return "X86ISD::HADD";
28601 case X86ISD::HSUB: return "X86ISD::HSUB";
28602 case X86ISD::FHADD: return "X86ISD::FHADD";
28603 case X86ISD::FHSUB: return "X86ISD::FHSUB";
28604 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
28605 case X86ISD::FMAX: return "X86ISD::FMAX";
28606 case X86ISD::FMAXS: return "X86ISD::FMAXS";
28607 case X86ISD::FMAX_SAE: return "X86ISD::FMAX_SAE";
28608 case X86ISD::FMAXS_SAE: return "X86ISD::FMAXS_SAE";
28609 case X86ISD::FMIN: return "X86ISD::FMIN";
28610 case X86ISD::FMINS: return "X86ISD::FMINS";
28611 case X86ISD::FMIN_SAE: return "X86ISD::FMIN_SAE";
28612 case X86ISD::FMINS_SAE: return "X86ISD::FMINS_SAE";
28613 case X86ISD::FMAXC: return "X86ISD::FMAXC";
28614 case X86ISD::FMINC: return "X86ISD::FMINC";
28615 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
28616 case X86ISD::FRCP: return "X86ISD::FRCP";
28617 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
28618 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
28619 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
28620 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
28621 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
28622 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
28623 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
28624 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
28625 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
28626 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
28627 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
28628 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
28629 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
28630 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
28631 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
28632 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
28633 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
28634 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
28635 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
28636 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
28637 case X86ISD::LADD: return "X86ISD::LADD";
28638 case X86ISD::LSUB: return "X86ISD::LSUB";
28639 case X86ISD::LOR: return "X86ISD::LOR";
28640 case X86ISD::LXOR: return "X86ISD::LXOR";
28641 case X86ISD::LAND: return "X86ISD::LAND";
28642 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
28643 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
28644 case X86ISD::VEXTRACT_STORE: return "X86ISD::VEXTRACT_STORE";
28645 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
28646 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
28647 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
28648 case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
28649 case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
28650 case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
28651 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
28652 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
28653 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
28654 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
28655 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
28656 case X86ISD::VFPEXT_SAE: return "X86ISD::VFPEXT_SAE";
28657 case X86ISD::VFPEXTS: return "X86ISD::VFPEXTS";
28658 case X86ISD::VFPEXTS_SAE: return "X86ISD::VFPEXTS_SAE";
28659 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
28660 case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
28661 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
28662 case X86ISD::VFPROUNDS: return "X86ISD::VFPROUNDS";
28663 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
28664 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
28665 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
28666 case X86ISD::VSHL: return "X86ISD::VSHL";
28667 case X86ISD::VSRL: return "X86ISD::VSRL";
28668 case X86ISD::VSRA: return "X86ISD::VSRA";
28669 case X86ISD::VSHLI: return "X86ISD::VSHLI";
28670 case X86ISD::VSRLI: return "X86ISD::VSRLI";
28671 case X86ISD::VSRAI: return "X86ISD::VSRAI";
28672 case X86ISD::VSHLV: return "X86ISD::VSHLV";
28673 case X86ISD::VSRLV: return "X86ISD::VSRLV";
28674 case X86ISD::VSRAV: return "X86ISD::VSRAV";
28675 case X86ISD::VROTLI: return "X86ISD::VROTLI";
28676 case X86ISD::VROTRI: return "X86ISD::VROTRI";
28677 case X86ISD::VPPERM: return "X86ISD::VPPERM";
28678 case X86ISD::CMPP: return "X86ISD::CMPP";
28679 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
28680 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
28681 case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
28682 case X86ISD::ADD: return "X86ISD::ADD";
28683 case X86ISD::SUB: return "X86ISD::SUB";
28684 case X86ISD::ADC: return "X86ISD::ADC";
28685 case X86ISD::SBB: return "X86ISD::SBB";
28686 case X86ISD::SMUL: return "X86ISD::SMUL";
28687 case X86ISD::UMUL: return "X86ISD::UMUL";
28688 case X86ISD::OR: return "X86ISD::OR";
28689 case X86ISD::XOR: return "X86ISD::XOR";
28690 case X86ISD::AND: return "X86ISD::AND";
28691 case X86ISD::BEXTR: return "X86ISD::BEXTR";
28692 case X86ISD::BZHI: return "X86ISD::BZHI";
28693 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
28694 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
28695 case X86ISD::PTEST: return "X86ISD::PTEST";
28696 case X86ISD::TESTP: return "X86ISD::TESTP";
28697 case X86ISD::KORTEST: return "X86ISD::KORTEST";
28698 case X86ISD::KTEST: return "X86ISD::KTEST";
28699 case X86ISD::KADD: return "X86ISD::KADD";
28700 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
28701 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
28702 case X86ISD::PACKSS: return "X86ISD::PACKSS";
28703 case X86ISD::PACKUS: return "X86ISD::PACKUS";
28704 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
28705 case X86ISD::VALIGN: return "X86ISD::VALIGN";
28706 case X86ISD::VSHLD: return "X86ISD::VSHLD";
28707 case X86ISD::VSHRD: return "X86ISD::VSHRD";
28708 case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
28709 case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
28710 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
28711 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
28712 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
28713 case X86ISD::SHUFP: return "X86ISD::SHUFP";
28714 case X86ISD::SHUF128: return "X86ISD::SHUF128";
28715 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
28716 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
28717 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
28718 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
28719 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
28720 case X86ISD::MOVSD: return "X86ISD::MOVSD";
28721 case X86ISD::MOVSS: return "X86ISD::MOVSS";
28722 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
28723 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
28724 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
28725 case X86ISD::VBROADCAST_LOAD: return "X86ISD::VBROADCAST_LOAD";
28726 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
28727 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
28728 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
28729 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
28730 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
28731 case X86ISD::VPERMV: return "X86ISD::VPERMV";
28732 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
28733 case X86ISD::VPERMI: return "X86ISD::VPERMI";
28734 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
28735 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
28736 case X86ISD::VFIXUPIMM_SAE: return "X86ISD::VFIXUPIMM_SAE";
28737 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
28738 case X86ISD::VFIXUPIMMS_SAE: return "X86ISD::VFIXUPIMMS_SAE";
28739 case X86ISD::VRANGE: return "X86ISD::VRANGE";
28740 case X86ISD::VRANGE_SAE: return "X86ISD::VRANGE_SAE";
28741 case X86ISD::VRANGES: return "X86ISD::VRANGES";
28742 case X86ISD::VRANGES_SAE: return "X86ISD::VRANGES_SAE";
28743 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
28744 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
28745 case X86ISD::PSADBW: return "X86ISD::PSADBW";
28746 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
28747 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
28748 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
28749 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
28750 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
28751 case X86ISD::MFENCE: return "X86ISD::MFENCE";
28752 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
28753 case X86ISD::SAHF: return "X86ISD::SAHF";
28754 case X86ISD::RDRAND: return "X86ISD::RDRAND";
28755 case X86ISD::RDSEED: return "X86ISD::RDSEED";
28756 case X86ISD::RDPKRU: return "X86ISD::RDPKRU";
28757 case X86ISD::WRPKRU: return "X86ISD::WRPKRU";
28758 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
28759 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
28760 case X86ISD::VPSHA: return "X86ISD::VPSHA";
28761 case X86ISD::VPSHL: return "X86ISD::VPSHL";
28762 case X86ISD::VPCOM: return "X86ISD::VPCOM";
28763 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
28764 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
28765 case X86ISD::FMSUB: return "X86ISD::FMSUB";
28766 case X86ISD::FNMADD: return "X86ISD::FNMADD";
28767 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
28768 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
28769 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
28770 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
28771 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
28772 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
28773 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
28774 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
28775 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
28776 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
28777 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
28778 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
28779 case X86ISD::VRNDSCALE_SAE: return "X86ISD::VRNDSCALE_SAE";
28780 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
28781 case X86ISD::VRNDSCALES_SAE: return "X86ISD::VRNDSCALES_SAE";
28782 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
28783 case X86ISD::VREDUCE_SAE: return "X86ISD::VREDUCE_SAE";
28784 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
28785 case X86ISD::VREDUCES_SAE: return "X86ISD::VREDUCES_SAE";
28786 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
28787 case X86ISD::VGETMANT_SAE: return "X86ISD::VGETMANT_SAE";
28788 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
28789 case X86ISD::VGETMANTS_SAE: return "X86ISD::VGETMANTS_SAE";
28790 case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
28791 case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
28792 case X86ISD::XTEST: return "X86ISD::XTEST";
28793 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
28794 case X86ISD::EXPAND: return "X86ISD::EXPAND";
28795 case X86ISD::SELECTS: return "X86ISD::SELECTS";
28796 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
28797 case X86ISD::RCP14: return "X86ISD::RCP14";
28798 case X86ISD::RCP14S: return "X86ISD::RCP14S";
28799 case X86ISD::RCP28: return "X86ISD::RCP28";
28800 case X86ISD::RCP28_SAE: return "X86ISD::RCP28_SAE";
28801 case X86ISD::RCP28S: return "X86ISD::RCP28S";
28802 case X86ISD::RCP28S_SAE: return "X86ISD::RCP28S_SAE";
28803 case X86ISD::EXP2: return "X86ISD::EXP2";
28804 case X86ISD::EXP2_SAE: return "X86ISD::EXP2_SAE";
28805 case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
28806 case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
28807 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
28808 case X86ISD::RSQRT28_SAE: return "X86ISD::RSQRT28_SAE";
28809 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
28810 case X86ISD::RSQRT28S_SAE: return "X86ISD::RSQRT28S_SAE";
28811 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
28812 case X86ISD::FADDS: return "X86ISD::FADDS";
28813 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
28814 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
28815 case X86ISD::FSUBS: return "X86ISD::FSUBS";
28816 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
28817 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
28818 case X86ISD::FMULS: return "X86ISD::FMULS";
28819 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
28820 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
28821 case X86ISD::FDIVS: return "X86ISD::FDIVS";
28822 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
28823 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
28824 case X86ISD::FSQRTS: return "X86ISD::FSQRTS";
28825 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
28826 case X86ISD::FGETEXP: return "X86ISD::FGETEXP";
28827 case X86ISD::FGETEXP_SAE: return "X86ISD::FGETEXP_SAE";
28828 case X86ISD::FGETEXPS: return "X86ISD::FGETEXPS";
28829 case X86ISD::FGETEXPS_SAE: return "X86ISD::FGETEXPS_SAE";
28830 case X86ISD::SCALEF: return "X86ISD::SCALEF";
28831 case X86ISD::SCALEF_RND: return "X86ISD::SCALEF_RND";
28832 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
28833 case X86ISD::SCALEFS_RND: return "X86ISD::SCALEFS_RND";
28834 case X86ISD::AVG: return "X86ISD::AVG";
28835 case X86ISD::MULHRS: return "X86ISD::MULHRS";
28836 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
28837 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
28838 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
28839 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
28840 case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
28841 case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
28842 case X86ISD::CVTTP2SI_SAE: return "X86ISD::CVTTP2SI_SAE";
28843 case X86ISD::CVTTP2UI_SAE: return "X86ISD::CVTTP2UI_SAE";
28844 case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
28845 case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
28846 case X86ISD::CVTTS2SI_SAE: return "X86ISD::CVTTS2SI_SAE";
28847 case X86ISD::CVTTS2UI_SAE: return "X86ISD::CVTTS2UI_SAE";
28848 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
28849 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
28850 case X86ISD::MCVTSI2P: return "X86ISD::MCVTSI2P";
28851 case X86ISD::MCVTUI2P: return "X86ISD::MCVTUI2P";
28852 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
28853 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
28854 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
28855 case X86ISD::SCALAR_SINT_TO_FP: return "X86ISD::SCALAR_SINT_TO_FP";
28856 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
28857 case X86ISD::SCALAR_UINT_TO_FP: return "X86ISD::SCALAR_UINT_TO_FP";
28858 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
28859 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
28860 case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
28861 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
28862 case X86ISD::CVTPH2PS_SAE: return "X86ISD::CVTPH2PS_SAE";
28863 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
28864 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
28865 case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
28866 case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
28867 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
28868 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
28869 case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
28870 case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
28871 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
28872 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
28873 case X86ISD::CVTNE2PS2BF16: return "X86ISD::CVTNE2PS2BF16";
28874 case X86ISD::CVTNEPS2BF16: return "X86ISD::CVTNEPS2BF16";
28875 case X86ISD::MCVTNEPS2BF16: return "X86ISD::MCVTNEPS2BF16";
28876 case X86ISD::DPBF16PS: return "X86ISD::DPBF16PS";
28877 case X86ISD::LWPINS: return "X86ISD::LWPINS";
28878 case X86ISD::MGATHER: return "X86ISD::MGATHER";
28879 case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
28880 case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
28881 case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
28882 case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
28883 case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
28884 case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
28885 case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
28886 case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
28887 case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
28888 case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
28889 case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
28890 case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
28891 case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
28892 case X86ISD::ENQCMD: return "X86ISD:ENQCMD";
28893 case X86ISD::ENQCMDS: return "X86ISD:ENQCMDS";
28894 case X86ISD::VP2INTERSECT: return "X86ISD::VP2INTERSECT";
28899 /// Return true if the addressing mode represented by AM is legal for this
28900 /// target, for a load/store of the specified type.
28901 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
28902 const AddrMode &AM, Type *Ty,
28904 Instruction *I) const {
28905 // X86 supports extremely general addressing modes.
28906 CodeModel::Model M = getTargetMachine().getCodeModel();
28908 // X86 allows a sign-extended 32-bit immediate field as a displacement.
28909 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
28913 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
28915 // If a reference to this global requires an extra load, we can't fold it.
28916 if (isGlobalStubReference(GVFlags))
28919 // If BaseGV requires a register for the PIC base, we cannot also have a
28920 // BaseReg specified.
28921 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
28924 // If lower 4G is not available, then we must use rip-relative addressing.
28925 if ((M != CodeModel::Small || isPositionIndependent()) &&
28926 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
28930 switch (AM.Scale) {
28936 // These scales always work.
28941 // These scales are formed with basereg+scalereg. Only accept if there is
28946 default: // Other stuff never works.
28953 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
28954 unsigned Bits = Ty->getScalarSizeInBits();
28956 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
28957 // particularly cheaper than those without.
28961 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
28962 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
28963 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
28966 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
28967 // shifts just as cheap as scalar ones.
28968 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
28971 // AVX512BW has shifts such as vpsllvw.
28972 if (Subtarget.hasBWI() && Bits == 16)
28975 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
28976 // fully general vector.
28980 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
28982 // These are non-commutative binops.
28983 // TODO: Add more X86ISD opcodes once we have test coverage.
28984 case X86ISD::ANDNP:
28985 case X86ISD::PCMPGT:
28988 case X86ISD::FANDN:
28992 return TargetLoweringBase::isBinOp(Opcode);
28995 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
28997 // TODO: Add more X86ISD opcodes once we have test coverage.
28998 case X86ISD::PCMPEQ:
28999 case X86ISD::PMULDQ:
29000 case X86ISD::PMULUDQ:
29001 case X86ISD::FMAXC:
29002 case X86ISD::FMINC:
29009 return TargetLoweringBase::isCommutativeBinOp(Opcode);
29012 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
29013 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
29015 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
29016 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
29017 return NumBits1 > NumBits2;
29020 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
29021 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
29024 if (!isTypeLegal(EVT::getEVT(Ty1)))
29027 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
29029 // Assuming the caller doesn't have a zeroext or signext return parameter,
29030 // truncation all the way down to i1 is valid.
29034 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
29035 return isInt<32>(Imm);
29038 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
29039 // Can also use sub to handle negated immediates.
29040 return isInt<32>(Imm);
29043 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
29044 return isInt<32>(Imm);
29047 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
29048 if (!VT1.isInteger() || !VT2.isInteger())
29050 unsigned NumBits1 = VT1.getSizeInBits();
29051 unsigned NumBits2 = VT2.getSizeInBits();
29052 return NumBits1 > NumBits2;
29055 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
29056 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
29057 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
29060 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
29061 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
29062 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
29065 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
29066 EVT VT1 = Val.getValueType();
29067 if (isZExtFree(VT1, VT2))
29070 if (Val.getOpcode() != ISD::LOAD)
29073 if (!VT1.isSimple() || !VT1.isInteger() ||
29074 !VT2.isSimple() || !VT2.isInteger())
29077 switch (VT1.getSimpleVT().SimpleTy) {
29082 // X86 has 8, 16, and 32-bit zero-extending loads.
29089 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
29090 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
29093 EVT SrcVT = ExtVal.getOperand(0).getValueType();
29095 // There is no extending load for vXi1.
29096 if (SrcVT.getScalarType() == MVT::i1)
29103 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
29104 if (!Subtarget.hasAnyFMA())
29107 VT = VT.getScalarType();
29109 if (!VT.isSimple())
29112 switch (VT.getSimpleVT().SimpleTy) {
29123 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
29124 // i16 instructions are longer (0x66 prefix) and potentially slower.
29125 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
29128 /// Targets can use this to indicate that they only support *some*
29129 /// VECTOR_SHUFFLE operations, those with specific masks.
29130 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
29131 /// are assumed to be legal.
29132 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
29133 if (!VT.isSimple())
29136 // Not for i1 vectors
29137 if (VT.getSimpleVT().getScalarType() == MVT::i1)
29140 // Very little shuffling can be done for 64-bit vectors right now.
29141 if (VT.getSimpleVT().getSizeInBits() == 64)
29144 // We only care that the types being shuffled are legal. The lowering can
29145 // handle any possible shuffle mask that results.
29146 return isTypeLegal(VT.getSimpleVT());
29149 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
29151 // Don't convert an 'and' into a shuffle that we don't directly support.
29152 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
29153 if (!Subtarget.hasAVX2())
29154 if (VT == MVT::v32i8 || VT == MVT::v16i16)
29157 // Just delegate to the generic legality, clear masks aren't special.
29158 return isShuffleMaskLegal(Mask, VT);
29161 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
29162 // If the subtarget is using retpolines, we need to not generate jump tables.
29163 if (Subtarget.useRetpolineIndirectBranches())
29166 // Otherwise, fallback on the generic logic.
29167 return TargetLowering::areJTsAllowed(Fn);
29170 //===----------------------------------------------------------------------===//
29171 // X86 Scheduler Hooks
29172 //===----------------------------------------------------------------------===//
29174 /// Utility function to emit xbegin specifying the start of an RTM region.
29175 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
29176 const TargetInstrInfo *TII) {
29177 DebugLoc DL = MI.getDebugLoc();
29179 const BasicBlock *BB = MBB->getBasicBlock();
29180 MachineFunction::iterator I = ++MBB->getIterator();
29182 // For the v = xbegin(), we generate
29191 // eax = # XABORT_DEF
29195 // v = phi(s0/mainBB, s1/fallBB)
29197 MachineBasicBlock *thisMBB = MBB;
29198 MachineFunction *MF = MBB->getParent();
29199 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
29200 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
29201 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
29202 MF->insert(I, mainMBB);
29203 MF->insert(I, fallMBB);
29204 MF->insert(I, sinkMBB);
29206 // Transfer the remainder of BB and its successor edges to sinkMBB.
29207 sinkMBB->splice(sinkMBB->begin(), MBB,
29208 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29209 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
29211 MachineRegisterInfo &MRI = MF->getRegInfo();
29212 Register DstReg = MI.getOperand(0).getReg();
29213 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
29214 Register mainDstReg = MRI.createVirtualRegister(RC);
29215 Register fallDstReg = MRI.createVirtualRegister(RC);
29219 // # fallthrough to mainMBB
29220 // # abortion to fallMBB
29221 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
29222 thisMBB->addSuccessor(mainMBB);
29223 thisMBB->addSuccessor(fallMBB);
29226 // mainDstReg := -1
29227 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
29228 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
29229 mainMBB->addSuccessor(sinkMBB);
29232 // ; pseudo instruction to model hardware's definition from XABORT
29233 // EAX := XABORT_DEF
29234 // fallDstReg := EAX
29235 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
29236 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
29238 fallMBB->addSuccessor(sinkMBB);
29241 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
29242 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
29243 .addReg(mainDstReg).addMBB(mainMBB)
29244 .addReg(fallDstReg).addMBB(fallMBB);
29246 MI.eraseFromParent();
29252 MachineBasicBlock *
29253 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
29254 MachineBasicBlock *MBB) const {
29255 // Emit va_arg instruction on X86-64.
29257 // Operands to this pseudo-instruction:
29258 // 0 ) Output : destination address (reg)
29259 // 1-5) Input : va_list address (addr, i64mem)
29260 // 6 ) ArgSize : Size (in bytes) of vararg type
29261 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
29262 // 8 ) Align : Alignment of type
29263 // 9 ) EFLAGS (implicit-def)
29265 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
29266 static_assert(X86::AddrNumOperands == 5,
29267 "VAARG_64 assumes 5 address operands");
29269 Register DestReg = MI.getOperand(0).getReg();
29270 MachineOperand &Base = MI.getOperand(1);
29271 MachineOperand &Scale = MI.getOperand(2);
29272 MachineOperand &Index = MI.getOperand(3);
29273 MachineOperand &Disp = MI.getOperand(4);
29274 MachineOperand &Segment = MI.getOperand(5);
29275 unsigned ArgSize = MI.getOperand(6).getImm();
29276 unsigned ArgMode = MI.getOperand(7).getImm();
29277 unsigned Align = MI.getOperand(8).getImm();
29279 MachineFunction *MF = MBB->getParent();
29281 // Memory Reference
29282 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
29284 MachineMemOperand *OldMMO = MI.memoperands().front();
29286 // Clone the MMO into two separate MMOs for loading and storing
29287 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
29288 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
29289 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
29290 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
29292 // Machine Information
29293 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29294 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
29295 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
29296 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
29297 DebugLoc DL = MI.getDebugLoc();
29299 // struct va_list {
29302 // i64 overflow_area (address)
29303 // i64 reg_save_area (address)
29305 // sizeof(va_list) = 24
29306 // alignment(va_list) = 8
29308 unsigned TotalNumIntRegs = 6;
29309 unsigned TotalNumXMMRegs = 8;
29310 bool UseGPOffset = (ArgMode == 1);
29311 bool UseFPOffset = (ArgMode == 2);
29312 unsigned MaxOffset = TotalNumIntRegs * 8 +
29313 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
29315 /* Align ArgSize to a multiple of 8 */
29316 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
29317 bool NeedsAlign = (Align > 8);
29319 MachineBasicBlock *thisMBB = MBB;
29320 MachineBasicBlock *overflowMBB;
29321 MachineBasicBlock *offsetMBB;
29322 MachineBasicBlock *endMBB;
29324 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
29325 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
29326 unsigned OffsetReg = 0;
29328 if (!UseGPOffset && !UseFPOffset) {
29329 // If we only pull from the overflow region, we don't create a branch.
29330 // We don't need to alter control flow.
29331 OffsetDestReg = 0; // unused
29332 OverflowDestReg = DestReg;
29334 offsetMBB = nullptr;
29335 overflowMBB = thisMBB;
29338 // First emit code to check if gp_offset (or fp_offset) is below the bound.
29339 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
29340 // If not, pull from overflow_area. (branch to overflowMBB)
29345 // offsetMBB overflowMBB
29350 // Registers for the PHI in endMBB
29351 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
29352 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
29354 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29355 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29356 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29357 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29359 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29361 // Insert the new basic blocks
29362 MF->insert(MBBIter, offsetMBB);
29363 MF->insert(MBBIter, overflowMBB);
29364 MF->insert(MBBIter, endMBB);
29366 // Transfer the remainder of MBB and its successor edges to endMBB.
29367 endMBB->splice(endMBB->begin(), thisMBB,
29368 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
29369 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
29371 // Make offsetMBB and overflowMBB successors of thisMBB
29372 thisMBB->addSuccessor(offsetMBB);
29373 thisMBB->addSuccessor(overflowMBB);
29375 // endMBB is a successor of both offsetMBB and overflowMBB
29376 offsetMBB->addSuccessor(endMBB);
29377 overflowMBB->addSuccessor(endMBB);
29379 // Load the offset value into a register
29380 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29381 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
29385 .addDisp(Disp, UseFPOffset ? 4 : 0)
29387 .setMemRefs(LoadOnlyMMO);
29389 // Check if there is enough room left to pull this argument.
29390 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
29392 .addImm(MaxOffset + 8 - ArgSizeA8);
29394 // Branch to "overflowMBB" if offset >= max
29395 // Fall through to "offsetMBB" otherwise
29396 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
29397 .addMBB(overflowMBB).addImm(X86::COND_AE);
29400 // In offsetMBB, emit code to use the reg_save_area.
29402 assert(OffsetReg != 0);
29404 // Read the reg_save_area address.
29405 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
29406 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
29412 .setMemRefs(LoadOnlyMMO);
29414 // Zero-extend the offset
29415 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
29416 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
29419 .addImm(X86::sub_32bit);
29421 // Add the offset to the reg_save_area to get the final address.
29422 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
29423 .addReg(OffsetReg64)
29424 .addReg(RegSaveReg);
29426 // Compute the offset for the next argument
29427 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29428 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
29430 .addImm(UseFPOffset ? 16 : 8);
29432 // Store it back into the va_list.
29433 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
29437 .addDisp(Disp, UseFPOffset ? 4 : 0)
29439 .addReg(NextOffsetReg)
29440 .setMemRefs(StoreOnlyMMO);
29443 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
29448 // Emit code to use overflow area
29451 // Load the overflow_area address into a register.
29452 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
29453 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
29459 .setMemRefs(LoadOnlyMMO);
29461 // If we need to align it, do so. Otherwise, just copy the address
29462 // to OverflowDestReg.
29464 // Align the overflow address
29465 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
29466 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
29468 // aligned_addr = (addr + (align-1)) & ~(align-1)
29469 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
29470 .addReg(OverflowAddrReg)
29473 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
29475 .addImm(~(uint64_t)(Align-1));
29477 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
29478 .addReg(OverflowAddrReg);
29481 // Compute the next overflow address after this argument.
29482 // (the overflow address should be kept 8-byte aligned)
29483 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
29484 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
29485 .addReg(OverflowDestReg)
29486 .addImm(ArgSizeA8);
29488 // Store the new overflow address.
29489 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
29495 .addReg(NextAddrReg)
29496 .setMemRefs(StoreOnlyMMO);
29498 // If we branched, emit the PHI to the front of endMBB.
29500 BuildMI(*endMBB, endMBB->begin(), DL,
29501 TII->get(X86::PHI), DestReg)
29502 .addReg(OffsetDestReg).addMBB(offsetMBB)
29503 .addReg(OverflowDestReg).addMBB(overflowMBB);
29506 // Erase the pseudo instruction
29507 MI.eraseFromParent();
29512 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
29513 MachineInstr &MI, MachineBasicBlock *MBB) const {
29514 // Emit code to save XMM registers to the stack. The ABI says that the
29515 // number of registers to save is given in %al, so it's theoretically
29516 // possible to do an indirect jump trick to avoid saving all of them,
29517 // however this code takes a simpler approach and just executes all
29518 // of the stores if %al is non-zero. It's less code, and it's probably
29519 // easier on the hardware branch predictor, and stores aren't all that
29520 // expensive anyway.
29522 // Create the new basic blocks. One block contains all the XMM stores,
29523 // and one block is the final destination regardless of whether any
29524 // stores were performed.
29525 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29526 MachineFunction *F = MBB->getParent();
29527 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29528 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
29529 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
29530 F->insert(MBBIter, XMMSaveMBB);
29531 F->insert(MBBIter, EndMBB);
29533 // Transfer the remainder of MBB and its successor edges to EndMBB.
29534 EndMBB->splice(EndMBB->begin(), MBB,
29535 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29536 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
29538 // The original block will now fall through to the XMM save block.
29539 MBB->addSuccessor(XMMSaveMBB);
29540 // The XMMSaveMBB will fall through to the end block.
29541 XMMSaveMBB->addSuccessor(EndMBB);
29543 // Now add the instructions.
29544 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29545 DebugLoc DL = MI.getDebugLoc();
29547 Register CountReg = MI.getOperand(0).getReg();
29548 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
29549 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
29551 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
29552 // If %al is 0, branch around the XMM save block.
29553 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
29554 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
29555 MBB->addSuccessor(EndMBB);
29558 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
29559 // that was just emitted, but clearly shouldn't be "saved".
29560 assert((MI.getNumOperands() <= 3 ||
29561 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
29562 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
29563 "Expected last argument to be EFLAGS");
29564 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
29565 // In the XMM save block, save all the XMM argument registers.
29566 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
29567 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
29568 MachineMemOperand *MMO = F->getMachineMemOperand(
29569 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
29570 MachineMemOperand::MOStore,
29571 /*Size=*/16, /*Align=*/16);
29572 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
29573 .addFrameIndex(RegSaveFrameIndex)
29574 .addImm(/*Scale=*/1)
29575 .addReg(/*IndexReg=*/0)
29576 .addImm(/*Disp=*/Offset)
29577 .addReg(/*Segment=*/0)
29578 .addReg(MI.getOperand(i).getReg())
29579 .addMemOperand(MMO);
29582 MI.eraseFromParent(); // The pseudo instruction is gone now.
29587 // The EFLAGS operand of SelectItr might be missing a kill marker
29588 // because there were multiple uses of EFLAGS, and ISel didn't know
29589 // which to mark. Figure out whether SelectItr should have had a
29590 // kill marker, and set it if it should. Returns the correct kill
29592 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
29593 MachineBasicBlock* BB,
29594 const TargetRegisterInfo* TRI) {
29595 // Scan forward through BB for a use/def of EFLAGS.
29596 MachineBasicBlock::iterator miI(std::next(SelectItr));
29597 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
29598 const MachineInstr& mi = *miI;
29599 if (mi.readsRegister(X86::EFLAGS))
29601 if (mi.definesRegister(X86::EFLAGS))
29602 break; // Should have kill-flag - update below.
29605 // If we hit the end of the block, check whether EFLAGS is live into a
29607 if (miI == BB->end()) {
29608 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
29609 sEnd = BB->succ_end();
29610 sItr != sEnd; ++sItr) {
29611 MachineBasicBlock* succ = *sItr;
29612 if (succ->isLiveIn(X86::EFLAGS))
29617 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
29618 // out. SelectMI should have a kill flag on EFLAGS.
29619 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
29623 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
29624 // together with other CMOV pseudo-opcodes into a single basic-block with
29625 // conditional jump around it.
29626 static bool isCMOVPseudo(MachineInstr &MI) {
29627 switch (MI.getOpcode()) {
29628 case X86::CMOV_FR32:
29629 case X86::CMOV_FR32X:
29630 case X86::CMOV_FR64:
29631 case X86::CMOV_FR64X:
29632 case X86::CMOV_GR8:
29633 case X86::CMOV_GR16:
29634 case X86::CMOV_GR32:
29635 case X86::CMOV_RFP32:
29636 case X86::CMOV_RFP64:
29637 case X86::CMOV_RFP80:
29638 case X86::CMOV_VR128:
29639 case X86::CMOV_VR128X:
29640 case X86::CMOV_VR256:
29641 case X86::CMOV_VR256X:
29642 case X86::CMOV_VR512:
29643 case X86::CMOV_VK2:
29644 case X86::CMOV_VK4:
29645 case X86::CMOV_VK8:
29646 case X86::CMOV_VK16:
29647 case X86::CMOV_VK32:
29648 case X86::CMOV_VK64:
29656 // Helper function, which inserts PHI functions into SinkMBB:
29657 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
29658 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
29659 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
29660 // the last PHI function inserted.
29661 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
29662 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
29663 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
29664 MachineBasicBlock *SinkMBB) {
29665 MachineFunction *MF = TrueMBB->getParent();
29666 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
29667 DebugLoc DL = MIItBegin->getDebugLoc();
29669 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
29670 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29672 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
29674 // As we are creating the PHIs, we have to be careful if there is more than
29675 // one. Later CMOVs may reference the results of earlier CMOVs, but later
29676 // PHIs have to reference the individual true/false inputs from earlier PHIs.
29677 // That also means that PHI construction must work forward from earlier to
29678 // later, and that the code must maintain a mapping from earlier PHI's
29679 // destination registers, and the registers that went into the PHI.
29680 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
29681 MachineInstrBuilder MIB;
29683 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
29684 Register DestReg = MIIt->getOperand(0).getReg();
29685 Register Op1Reg = MIIt->getOperand(1).getReg();
29686 Register Op2Reg = MIIt->getOperand(2).getReg();
29688 // If this CMOV we are generating is the opposite condition from
29689 // the jump we generated, then we have to swap the operands for the
29690 // PHI that is going to be generated.
29691 if (MIIt->getOperand(3).getImm() == OppCC)
29692 std::swap(Op1Reg, Op2Reg);
29694 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
29695 Op1Reg = RegRewriteTable[Op1Reg].first;
29697 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
29698 Op2Reg = RegRewriteTable[Op2Reg].second;
29700 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
29706 // Add this PHI to the rewrite table.
29707 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
29713 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
29714 MachineBasicBlock *
29715 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
29716 MachineInstr &SecondCascadedCMOV,
29717 MachineBasicBlock *ThisMBB) const {
29718 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29719 DebugLoc DL = FirstCMOV.getDebugLoc();
29721 // We lower cascaded CMOVs such as
29723 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
29725 // to two successive branches.
29727 // Without this, we would add a PHI between the two jumps, which ends up
29728 // creating a few copies all around. For instance, for
29730 // (sitofp (zext (fcmp une)))
29732 // we would generate:
29734 // ucomiss %xmm1, %xmm0
29735 // movss <1.0f>, %xmm0
29736 // movaps %xmm0, %xmm1
29738 // xorps %xmm1, %xmm1
29741 // movaps %xmm1, %xmm0
29745 // because this custom-inserter would have generated:
29757 // A: X = ...; Y = ...
29759 // C: Z = PHI [X, A], [Y, B]
29761 // E: PHI [X, C], [Z, D]
29763 // If we lower both CMOVs in a single step, we can instead generate:
29775 // A: X = ...; Y = ...
29777 // E: PHI [X, A], [X, C], [Y, D]
29779 // Which, in our sitofp/fcmp example, gives us something like:
29781 // ucomiss %xmm1, %xmm0
29782 // movss <1.0f>, %xmm0
29785 // xorps %xmm0, %xmm0
29790 // We lower cascaded CMOV into two successive branches to the same block.
29791 // EFLAGS is used by both, so mark it as live in the second.
29792 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29793 MachineFunction *F = ThisMBB->getParent();
29794 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29795 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29796 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29798 MachineFunction::iterator It = ++ThisMBB->getIterator();
29799 F->insert(It, FirstInsertedMBB);
29800 F->insert(It, SecondInsertedMBB);
29801 F->insert(It, SinkMBB);
29803 // For a cascaded CMOV, we lower it to two successive branches to
29804 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
29805 // the FirstInsertedMBB.
29806 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
29808 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29809 // live into the sink and copy blocks.
29810 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29811 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
29812 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
29813 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
29814 SinkMBB->addLiveIn(X86::EFLAGS);
29817 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29818 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
29819 std::next(MachineBasicBlock::iterator(FirstCMOV)),
29821 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29823 // Fallthrough block for ThisMBB.
29824 ThisMBB->addSuccessor(FirstInsertedMBB);
29825 // The true block target of the first branch is always SinkMBB.
29826 ThisMBB->addSuccessor(SinkMBB);
29827 // Fallthrough block for FirstInsertedMBB.
29828 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
29829 // The true block for the branch of FirstInsertedMBB.
29830 FirstInsertedMBB->addSuccessor(SinkMBB);
29831 // This is fallthrough.
29832 SecondInsertedMBB->addSuccessor(SinkMBB);
29834 // Create the conditional branch instructions.
29835 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
29836 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
29838 X86::CondCode SecondCC =
29839 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
29840 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
29843 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
29844 Register DestReg = FirstCMOV.getOperand(0).getReg();
29845 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
29846 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
29847 MachineInstrBuilder MIB =
29848 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
29850 .addMBB(SecondInsertedMBB)
29854 // The second SecondInsertedMBB provides the same incoming value as the
29855 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
29856 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
29857 // Copy the PHI result to the register defined by the second CMOV.
29858 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
29859 TII->get(TargetOpcode::COPY),
29860 SecondCascadedCMOV.getOperand(0).getReg())
29861 .addReg(FirstCMOV.getOperand(0).getReg());
29863 // Now remove the CMOVs.
29864 FirstCMOV.eraseFromParent();
29865 SecondCascadedCMOV.eraseFromParent();
29870 MachineBasicBlock *
29871 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
29872 MachineBasicBlock *ThisMBB) const {
29873 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29874 DebugLoc DL = MI.getDebugLoc();
29876 // To "insert" a SELECT_CC instruction, we actually have to insert the
29877 // diamond control-flow pattern. The incoming instruction knows the
29878 // destination vreg to set, the condition code register to branch on, the
29879 // true/false values to select between and a branch opcode to use.
29884 // cmpTY ccX, r1, r2
29886 // fallthrough --> FalseMBB
29888 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
29889 // as described above, by inserting a BB, and then making a PHI at the join
29890 // point to select the true and false operands of the CMOV in the PHI.
29892 // The code also handles two different cases of multiple CMOV opcodes
29896 // In this case, there are multiple CMOVs in a row, all which are based on
29897 // the same condition setting (or the exact opposite condition setting).
29898 // In this case we can lower all the CMOVs using a single inserted BB, and
29899 // then make a number of PHIs at the join point to model the CMOVs. The only
29900 // trickiness here, is that in a case like:
29902 // t2 = CMOV cond1 t1, f1
29903 // t3 = CMOV cond1 t2, f2
29905 // when rewriting this into PHIs, we have to perform some renaming on the
29906 // temps since you cannot have a PHI operand refer to a PHI result earlier
29907 // in the same block. The "simple" but wrong lowering would be:
29909 // t2 = PHI t1(BB1), f1(BB2)
29910 // t3 = PHI t2(BB1), f2(BB2)
29912 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
29913 // renaming is to note that on the path through BB1, t2 is really just a
29914 // copy of t1, and do that renaming, properly generating:
29916 // t2 = PHI t1(BB1), f1(BB2)
29917 // t3 = PHI t1(BB1), f2(BB2)
29920 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
29921 // function - EmitLoweredCascadedSelect.
29923 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
29924 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29925 MachineInstr *LastCMOV = &MI;
29926 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
29928 // Check for case 1, where there are multiple CMOVs with the same condition
29929 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
29930 // number of jumps the most.
29932 if (isCMOVPseudo(MI)) {
29933 // See if we have a string of CMOVS with the same condition. Skip over
29934 // intervening debug insts.
29935 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
29936 (NextMIIt->getOperand(3).getImm() == CC ||
29937 NextMIIt->getOperand(3).getImm() == OppCC)) {
29938 LastCMOV = &*NextMIIt;
29940 NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
29944 // This checks for case 2, but only do this if we didn't already find
29945 // case 1, as indicated by LastCMOV == MI.
29946 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
29947 NextMIIt->getOpcode() == MI.getOpcode() &&
29948 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
29949 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
29950 NextMIIt->getOperand(1).isKill()) {
29951 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
29954 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29955 MachineFunction *F = ThisMBB->getParent();
29956 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
29957 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29959 MachineFunction::iterator It = ++ThisMBB->getIterator();
29960 F->insert(It, FalseMBB);
29961 F->insert(It, SinkMBB);
29963 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29964 // live into the sink and copy blocks.
29965 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29966 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
29967 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
29968 FalseMBB->addLiveIn(X86::EFLAGS);
29969 SinkMBB->addLiveIn(X86::EFLAGS);
29972 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
29973 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
29974 auto DbgIt = MachineBasicBlock::iterator(MI);
29975 while (DbgIt != DbgEnd) {
29976 auto Next = std::next(DbgIt);
29977 if (DbgIt->isDebugInstr())
29978 SinkMBB->push_back(DbgIt->removeFromParent());
29982 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29983 SinkMBB->splice(SinkMBB->end(), ThisMBB,
29984 std::next(MachineBasicBlock::iterator(LastCMOV)),
29986 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29988 // Fallthrough block for ThisMBB.
29989 ThisMBB->addSuccessor(FalseMBB);
29990 // The true block target of the first (or only) branch is always a SinkMBB.
29991 ThisMBB->addSuccessor(SinkMBB);
29992 // Fallthrough block for FalseMBB.
29993 FalseMBB->addSuccessor(SinkMBB);
29995 // Create the conditional branch instruction.
29996 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
29999 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
30001 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
30002 MachineBasicBlock::iterator MIItEnd =
30003 std::next(MachineBasicBlock::iterator(LastCMOV));
30004 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
30006 // Now remove the CMOV(s).
30007 ThisMBB->erase(MIItBegin, MIItEnd);
30012 MachineBasicBlock *
30013 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
30014 MachineBasicBlock *BB) const {
30015 MachineFunction *MF = BB->getParent();
30016 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30017 DebugLoc DL = MI.getDebugLoc();
30018 const BasicBlock *LLVM_BB = BB->getBasicBlock();
30020 assert(MF->shouldSplitStack());
30022 const bool Is64Bit = Subtarget.is64Bit();
30023 const bool IsLP64 = Subtarget.isTarget64BitLP64();
30025 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
30026 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
30029 // ... [Till the alloca]
30030 // If stacklet is not large enough, jump to mallocMBB
30033 // Allocate by subtracting from RSP
30034 // Jump to continueMBB
30037 // Allocate by call to runtime
30041 // [rest of original BB]
30044 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30045 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30046 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30048 MachineRegisterInfo &MRI = MF->getRegInfo();
30049 const TargetRegisterClass *AddrRegClass =
30050 getRegClassFor(getPointerTy(MF->getDataLayout()));
30052 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
30053 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
30054 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
30055 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
30056 sizeVReg = MI.getOperand(1).getReg(),
30058 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
30060 MachineFunction::iterator MBBIter = ++BB->getIterator();
30062 MF->insert(MBBIter, bumpMBB);
30063 MF->insert(MBBIter, mallocMBB);
30064 MF->insert(MBBIter, continueMBB);
30066 continueMBB->splice(continueMBB->begin(), BB,
30067 std::next(MachineBasicBlock::iterator(MI)), BB->end());
30068 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
30070 // Add code to the main basic block to check if the stack limit has been hit,
30071 // and if so, jump to mallocMBB otherwise to bumpMBB.
30072 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
30073 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
30074 .addReg(tmpSPVReg).addReg(sizeVReg);
30075 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
30076 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
30077 .addReg(SPLimitVReg);
30078 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
30080 // bumpMBB simply decreases the stack pointer, since we know the current
30081 // stacklet has enough space.
30082 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
30083 .addReg(SPLimitVReg);
30084 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
30085 .addReg(SPLimitVReg);
30086 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
30088 // Calls into a routine in libgcc to allocate more space from the heap.
30089 const uint32_t *RegMask =
30090 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
30092 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
30094 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
30095 .addExternalSymbol("__morestack_allocate_stack_space")
30096 .addRegMask(RegMask)
30097 .addReg(X86::RDI, RegState::Implicit)
30098 .addReg(X86::RAX, RegState::ImplicitDefine);
30099 } else if (Is64Bit) {
30100 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
30102 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
30103 .addExternalSymbol("__morestack_allocate_stack_space")
30104 .addRegMask(RegMask)
30105 .addReg(X86::EDI, RegState::Implicit)
30106 .addReg(X86::EAX, RegState::ImplicitDefine);
30108 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
30110 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
30111 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
30112 .addExternalSymbol("__morestack_allocate_stack_space")
30113 .addRegMask(RegMask)
30114 .addReg(X86::EAX, RegState::ImplicitDefine);
30118 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
30121 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
30122 .addReg(IsLP64 ? X86::RAX : X86::EAX);
30123 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
30125 // Set up the CFG correctly.
30126 BB->addSuccessor(bumpMBB);
30127 BB->addSuccessor(mallocMBB);
30128 mallocMBB->addSuccessor(continueMBB);
30129 bumpMBB->addSuccessor(continueMBB);
30131 // Take care of the PHI nodes.
30132 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
30133 MI.getOperand(0).getReg())
30134 .addReg(mallocPtrVReg)
30136 .addReg(bumpSPPtrVReg)
30139 // Delete the original pseudo instruction.
30140 MI.eraseFromParent();
30143 return continueMBB;
30146 MachineBasicBlock *
30147 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
30148 MachineBasicBlock *BB) const {
30149 MachineFunction *MF = BB->getParent();
30150 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30151 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
30152 DebugLoc DL = MI.getDebugLoc();
30154 assert(!isAsynchronousEHPersonality(
30155 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
30156 "SEH does not use catchret!");
30158 // Only 32-bit EH needs to worry about manually restoring stack pointers.
30159 if (!Subtarget.is32Bit())
30162 // C++ EH creates a new target block to hold the restore code, and wires up
30163 // the new block to the return destination with a normal JMP_4.
30164 MachineBasicBlock *RestoreMBB =
30165 MF->CreateMachineBasicBlock(BB->getBasicBlock());
30166 assert(BB->succ_size() == 1);
30167 MF->insert(std::next(BB->getIterator()), RestoreMBB);
30168 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
30169 BB->addSuccessor(RestoreMBB);
30170 MI.getOperand(0).setMBB(RestoreMBB);
30172 auto RestoreMBBI = RestoreMBB->begin();
30173 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
30174 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
30178 MachineBasicBlock *
30179 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
30180 MachineBasicBlock *BB) const {
30181 MachineFunction *MF = BB->getParent();
30182 const Constant *PerFn = MF->getFunction().getPersonalityFn();
30183 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
30184 // Only 32-bit SEH requires special handling for catchpad.
30185 if (IsSEH && Subtarget.is32Bit()) {
30186 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30187 DebugLoc DL = MI.getDebugLoc();
30188 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
30190 MI.eraseFromParent();
30194 MachineBasicBlock *
30195 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
30196 MachineBasicBlock *BB) const {
30197 // So, here we replace TLSADDR with the sequence:
30198 // adjust_stackdown -> TLSADDR -> adjust_stackup.
30199 // We need this because TLSADDR is lowered into calls
30200 // inside MC, therefore without the two markers shrink-wrapping
30201 // may push the prologue/epilogue pass them.
30202 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30203 DebugLoc DL = MI.getDebugLoc();
30204 MachineFunction &MF = *BB->getParent();
30206 // Emit CALLSEQ_START right before the instruction.
30207 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
30208 MachineInstrBuilder CallseqStart =
30209 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
30210 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
30212 // Emit CALLSEQ_END right after the instruction.
30213 // We don't call erase from parent because we want to keep the
30214 // original instruction around.
30215 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
30216 MachineInstrBuilder CallseqEnd =
30217 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
30218 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
30223 MachineBasicBlock *
30224 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
30225 MachineBasicBlock *BB) const {
30226 // This is pretty easy. We're taking the value that we received from
30227 // our load from the relocation, sticking it in either RDI (x86-64)
30228 // or EAX and doing an indirect call. The return value will then
30229 // be in the normal return register.
30230 MachineFunction *F = BB->getParent();
30231 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30232 DebugLoc DL = MI.getDebugLoc();
30234 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
30235 assert(MI.getOperand(3).isGlobal() && "This should be a global");
30237 // Get a register mask for the lowered call.
30238 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
30239 // proper register mask.
30240 const uint32_t *RegMask =
30241 Subtarget.is64Bit() ?
30242 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
30243 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
30244 if (Subtarget.is64Bit()) {
30245 MachineInstrBuilder MIB =
30246 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
30250 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30251 MI.getOperand(3).getTargetFlags())
30253 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
30254 addDirectMem(MIB, X86::RDI);
30255 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
30256 } else if (!isPositionIndependent()) {
30257 MachineInstrBuilder MIB =
30258 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
30262 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30263 MI.getOperand(3).getTargetFlags())
30265 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
30266 addDirectMem(MIB, X86::EAX);
30267 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
30269 MachineInstrBuilder MIB =
30270 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
30271 .addReg(TII->getGlobalBaseReg(F))
30274 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30275 MI.getOperand(3).getTargetFlags())
30277 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
30278 addDirectMem(MIB, X86::EAX);
30279 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
30282 MI.eraseFromParent(); // The pseudo instruction is gone now.
30286 static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
30288 case X86::RETPOLINE_CALL32:
30289 return X86::CALLpcrel32;
30290 case X86::RETPOLINE_CALL64:
30291 return X86::CALL64pcrel32;
30292 case X86::RETPOLINE_TCRETURN32:
30293 return X86::TCRETURNdi;
30294 case X86::RETPOLINE_TCRETURN64:
30295 return X86::TCRETURNdi64;
30297 llvm_unreachable("not retpoline opcode");
30300 static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
30302 if (Subtarget.useRetpolineExternalThunk()) {
30303 // When using an external thunk for retpolines, we pick names that match the
30304 // names GCC happens to use as well. This helps simplify the implementation
30305 // of the thunks for kernels where they have no easy ability to create
30306 // aliases and are doing non-trivial configuration of the thunk's body. For
30307 // example, the Linux kernel will do boot-time hot patching of the thunk
30308 // bodies and cannot easily export aliases of these to loaded modules.
30310 // Note that at any point in the future, we may need to change the semantics
30311 // of how we implement retpolines and at that time will likely change the
30312 // name of the called thunk. Essentially, there is no hard guarantee that
30313 // LLVM will generate calls to specific thunks, we merely make a best-effort
30314 // attempt to help out kernels and other systems where duplicating the
30315 // thunks is costly.
30318 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30319 return "__x86_indirect_thunk_eax";
30321 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30322 return "__x86_indirect_thunk_ecx";
30324 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30325 return "__x86_indirect_thunk_edx";
30327 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30328 return "__x86_indirect_thunk_edi";
30330 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
30331 return "__x86_indirect_thunk_r11";
30333 llvm_unreachable("unexpected reg for retpoline");
30336 // When targeting an internal COMDAT thunk use an LLVM-specific name.
30339 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30340 return "__llvm_retpoline_eax";
30342 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30343 return "__llvm_retpoline_ecx";
30345 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30346 return "__llvm_retpoline_edx";
30348 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
30349 return "__llvm_retpoline_edi";
30351 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
30352 return "__llvm_retpoline_r11";
30354 llvm_unreachable("unexpected reg for retpoline");
30357 MachineBasicBlock *
30358 X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
30359 MachineBasicBlock *BB) const {
30360 // Copy the virtual register into the R11 physical register and
30361 // call the retpoline thunk.
30362 DebugLoc DL = MI.getDebugLoc();
30363 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30364 Register CalleeVReg = MI.getOperand(0).getReg();
30365 unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
30367 // Find an available scratch register to hold the callee. On 64-bit, we can
30368 // just use R11, but we scan for uses anyway to ensure we don't generate
30369 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
30370 // already a register use operand to the call to hold the callee. If none
30371 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
30372 // register and ESI is the base pointer to realigned stack frames with VLAs.
30373 SmallVector<unsigned, 3> AvailableRegs;
30374 if (Subtarget.is64Bit())
30375 AvailableRegs.push_back(X86::R11);
30377 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
30379 // Zero out any registers that are already used.
30380 for (const auto &MO : MI.operands()) {
30381 if (MO.isReg() && MO.isUse())
30382 for (unsigned &Reg : AvailableRegs)
30383 if (Reg == MO.getReg())
30387 // Choose the first remaining non-zero available register.
30388 unsigned AvailableReg = 0;
30389 for (unsigned MaybeReg : AvailableRegs) {
30391 AvailableReg = MaybeReg;
30396 report_fatal_error("calling convention incompatible with retpoline, no "
30397 "available registers");
30399 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
30401 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
30402 .addReg(CalleeVReg);
30403 MI.getOperand(0).ChangeToES(Symbol);
30404 MI.setDesc(TII->get(Opc));
30405 MachineInstrBuilder(*BB->getParent(), &MI)
30406 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
30410 /// SetJmp implies future control flow change upon calling the corresponding
30412 /// Instead of using the 'return' instruction, the long jump fixes the stack and
30413 /// performs an indirect branch. To do so it uses the registers that were stored
30414 /// in the jump buffer (when calling SetJmp).
30415 /// In case the shadow stack is enabled we need to fix it as well, because some
30416 /// return addresses will be skipped.
30417 /// The function will save the SSP for future fixing in the function
30418 /// emitLongJmpShadowStackFix.
30419 /// \sa emitLongJmpShadowStackFix
30420 /// \param [in] MI The temporary Machine Instruction for the builtin.
30421 /// \param [in] MBB The Machine Basic Block that will be modified.
30422 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
30423 MachineBasicBlock *MBB) const {
30424 DebugLoc DL = MI.getDebugLoc();
30425 MachineFunction *MF = MBB->getParent();
30426 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30427 MachineRegisterInfo &MRI = MF->getRegInfo();
30428 MachineInstrBuilder MIB;
30430 // Memory Reference.
30431 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30432 MI.memoperands_end());
30434 // Initialize a register with zero.
30435 MVT PVT = getPointerTy(MF->getDataLayout());
30436 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30437 Register ZReg = MRI.createVirtualRegister(PtrRC);
30438 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30439 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
30441 .addReg(ZReg, RegState::Undef)
30442 .addReg(ZReg, RegState::Undef);
30444 // Read the current SSP Register value to the zeroed register.
30445 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30446 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30447 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30449 // Write the SSP register value to offset 3 in input memory buffer.
30450 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30451 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
30452 const int64_t SSPOffset = 3 * PVT.getStoreSize();
30453 const unsigned MemOpndSlot = 1;
30454 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30455 if (i == X86::AddrDisp)
30456 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
30458 MIB.add(MI.getOperand(MemOpndSlot + i));
30460 MIB.addReg(SSPCopyReg);
30461 MIB.setMemRefs(MMOs);
30464 MachineBasicBlock *
30465 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
30466 MachineBasicBlock *MBB) const {
30467 DebugLoc DL = MI.getDebugLoc();
30468 MachineFunction *MF = MBB->getParent();
30469 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30470 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30471 MachineRegisterInfo &MRI = MF->getRegInfo();
30473 const BasicBlock *BB = MBB->getBasicBlock();
30474 MachineFunction::iterator I = ++MBB->getIterator();
30476 // Memory Reference
30477 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30478 MI.memoperands_end());
30481 unsigned MemOpndSlot = 0;
30483 unsigned CurOp = 0;
30485 DstReg = MI.getOperand(CurOp++).getReg();
30486 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30487 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
30489 Register mainDstReg = MRI.createVirtualRegister(RC);
30490 Register restoreDstReg = MRI.createVirtualRegister(RC);
30492 MemOpndSlot = CurOp;
30494 MVT PVT = getPointerTy(MF->getDataLayout());
30495 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30496 "Invalid Pointer Size!");
30498 // For v = setjmp(buf), we generate
30501 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
30502 // SjLjSetup restoreMBB
30508 // v = phi(main, restore)
30511 // if base pointer being used, load it from frame
30514 MachineBasicBlock *thisMBB = MBB;
30515 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30516 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30517 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
30518 MF->insert(I, mainMBB);
30519 MF->insert(I, sinkMBB);
30520 MF->push_back(restoreMBB);
30521 restoreMBB->setHasAddressTaken();
30523 MachineInstrBuilder MIB;
30525 // Transfer the remainder of BB and its successor edges to sinkMBB.
30526 sinkMBB->splice(sinkMBB->begin(), MBB,
30527 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30528 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30531 unsigned PtrStoreOpc = 0;
30532 unsigned LabelReg = 0;
30533 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30534 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30535 !isPositionIndependent();
30537 // Prepare IP either in reg or imm.
30538 if (!UseImmLabel) {
30539 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30540 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30541 LabelReg = MRI.createVirtualRegister(PtrRC);
30542 if (Subtarget.is64Bit()) {
30543 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
30547 .addMBB(restoreMBB)
30550 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
30551 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
30552 .addReg(XII->getGlobalBaseReg(MF))
30555 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
30559 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30561 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
30562 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30563 if (i == X86::AddrDisp)
30564 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
30566 MIB.add(MI.getOperand(MemOpndSlot + i));
30569 MIB.addReg(LabelReg);
30571 MIB.addMBB(restoreMBB);
30572 MIB.setMemRefs(MMOs);
30574 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30575 emitSetJmpShadowStackFix(MI, thisMBB);
30579 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
30580 .addMBB(restoreMBB);
30582 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30583 MIB.addRegMask(RegInfo->getNoPreservedMask());
30584 thisMBB->addSuccessor(mainMBB);
30585 thisMBB->addSuccessor(restoreMBB);
30589 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
30590 mainMBB->addSuccessor(sinkMBB);
30593 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
30594 TII->get(X86::PHI), DstReg)
30595 .addReg(mainDstReg).addMBB(mainMBB)
30596 .addReg(restoreDstReg).addMBB(restoreMBB);
30599 if (RegInfo->hasBasePointer(*MF)) {
30600 const bool Uses64BitFramePtr =
30601 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30602 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
30603 X86FI->setRestoreBasePointer(MF);
30604 Register FramePtr = RegInfo->getFrameRegister(*MF);
30605 Register BasePtr = RegInfo->getBaseRegister();
30606 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
30607 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
30608 FramePtr, true, X86FI->getRestoreBasePointerOffset())
30609 .setMIFlag(MachineInstr::FrameSetup);
30611 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
30612 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30613 restoreMBB->addSuccessor(sinkMBB);
30615 MI.eraseFromParent();
30619 /// Fix the shadow stack using the previously saved SSP pointer.
30620 /// \sa emitSetJmpShadowStackFix
30621 /// \param [in] MI The temporary Machine Instruction for the builtin.
30622 /// \param [in] MBB The Machine Basic Block that will be modified.
30623 /// \return The sink MBB that will perform the future indirect branch.
30624 MachineBasicBlock *
30625 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
30626 MachineBasicBlock *MBB) const {
30627 DebugLoc DL = MI.getDebugLoc();
30628 MachineFunction *MF = MBB->getParent();
30629 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30630 MachineRegisterInfo &MRI = MF->getRegInfo();
30632 // Memory Reference
30633 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30634 MI.memoperands_end());
30636 MVT PVT = getPointerTy(MF->getDataLayout());
30637 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30640 // xor vreg1, vreg1
30642 // test vreg1, vreg1
30643 // je sinkMBB # Jump if Shadow Stack is not supported
30645 // mov buf+24/12(%rip), vreg2
30646 // sub vreg1, vreg2
30647 // jbe sinkMBB # No need to fix the Shadow Stack
30650 // incssp vreg2 # fix the SSP according to the lower 8 bits
30653 // fixShadowLoopPrepareMBB:
30656 // fixShadowLoopMBB:
30659 // jne fixShadowLoopMBB # Iterate until you finish fixing
30660 // # the Shadow Stack
30663 MachineFunction::iterator I = ++MBB->getIterator();
30664 const BasicBlock *BB = MBB->getBasicBlock();
30666 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
30667 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30668 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
30669 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
30670 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
30671 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30672 MF->insert(I, checkSspMBB);
30673 MF->insert(I, fallMBB);
30674 MF->insert(I, fixShadowMBB);
30675 MF->insert(I, fixShadowLoopPrepareMBB);
30676 MF->insert(I, fixShadowLoopMBB);
30677 MF->insert(I, sinkMBB);
30679 // Transfer the remainder of BB and its successor edges to sinkMBB.
30680 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
30682 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30684 MBB->addSuccessor(checkSspMBB);
30686 // Initialize a register with zero.
30687 Register ZReg = MRI.createVirtualRegister(PtrRC);
30688 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30689 BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
30691 .addReg(ZReg, RegState::Undef)
30692 .addReg(ZReg, RegState::Undef);
30694 // Read the current SSP Register value to the zeroed register.
30695 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30696 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30697 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30699 // Check whether the result of the SSP register is zero and jump directly
30701 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
30702 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
30703 .addReg(SSPCopyReg)
30704 .addReg(SSPCopyReg);
30705 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30706 checkSspMBB->addSuccessor(sinkMBB);
30707 checkSspMBB->addSuccessor(fallMBB);
30709 // Reload the previously saved SSP register value.
30710 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
30711 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30712 const int64_t SPPOffset = 3 * PVT.getStoreSize();
30713 MachineInstrBuilder MIB =
30714 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
30715 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30716 const MachineOperand &MO = MI.getOperand(i);
30717 if (i == X86::AddrDisp)
30718 MIB.addDisp(MO, SPPOffset);
30719 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30720 // preserve kill flags.
30721 MIB.addReg(MO.getReg());
30725 MIB.setMemRefs(MMOs);
30727 // Subtract the current SSP from the previous SSP.
30728 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
30729 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
30730 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
30731 .addReg(PrevSSPReg)
30732 .addReg(SSPCopyReg);
30734 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
30735 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
30736 fallMBB->addSuccessor(sinkMBB);
30737 fallMBB->addSuccessor(fixShadowMBB);
30739 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
30740 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
30741 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
30742 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
30743 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
30747 // Increase SSP when looking only on the lower 8 bits of the delta.
30748 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
30749 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
30751 // Reset the lower 8 bits.
30752 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
30753 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
30754 .addReg(SspFirstShrReg)
30757 // Jump if the result of the shift is zero.
30758 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30759 fixShadowMBB->addSuccessor(sinkMBB);
30760 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
30762 // Do a single shift left.
30763 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
30764 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
30765 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
30766 .addReg(SspSecondShrReg);
30768 // Save the value 128 to a register (will be used next with incssp).
30769 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
30770 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
30771 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
30773 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
30775 // Since incssp only looks at the lower 8 bits, we might need to do several
30776 // iterations of incssp until we finish fixing the shadow stack.
30777 Register DecReg = MRI.createVirtualRegister(PtrRC);
30778 Register CounterReg = MRI.createVirtualRegister(PtrRC);
30779 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
30780 .addReg(SspAfterShlReg)
30781 .addMBB(fixShadowLoopPrepareMBB)
30783 .addMBB(fixShadowLoopMBB);
30785 // Every iteration we increase the SSP by 128.
30786 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
30788 // Every iteration we decrement the counter by 1.
30789 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
30790 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
30792 // Jump if the counter is not zero yet.
30793 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
30794 fixShadowLoopMBB->addSuccessor(sinkMBB);
30795 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
30800 MachineBasicBlock *
30801 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
30802 MachineBasicBlock *MBB) const {
30803 DebugLoc DL = MI.getDebugLoc();
30804 MachineFunction *MF = MBB->getParent();
30805 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30806 MachineRegisterInfo &MRI = MF->getRegInfo();
30808 // Memory Reference
30809 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30810 MI.memoperands_end());
30812 MVT PVT = getPointerTy(MF->getDataLayout());
30813 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
30814 "Invalid Pointer Size!");
30816 const TargetRegisterClass *RC =
30817 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30818 Register Tmp = MRI.createVirtualRegister(RC);
30819 // Since FP is only updated here but NOT referenced, it's treated as GPR.
30820 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30821 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
30822 Register SP = RegInfo->getStackRegister();
30824 MachineInstrBuilder MIB;
30826 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30827 const int64_t SPOffset = 2 * PVT.getStoreSize();
30829 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30830 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
30832 MachineBasicBlock *thisMBB = MBB;
30834 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
30835 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30836 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
30840 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
30841 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30842 const MachineOperand &MO = MI.getOperand(i);
30843 if (MO.isReg()) // Don't add the whole operand, we don't want to
30844 // preserve kill flags.
30845 MIB.addReg(MO.getReg());
30849 MIB.setMemRefs(MMOs);
30852 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
30853 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30854 const MachineOperand &MO = MI.getOperand(i);
30855 if (i == X86::AddrDisp)
30856 MIB.addDisp(MO, LabelOffset);
30857 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30858 // preserve kill flags.
30859 MIB.addReg(MO.getReg());
30863 MIB.setMemRefs(MMOs);
30866 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
30867 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30868 if (i == X86::AddrDisp)
30869 MIB.addDisp(MI.getOperand(i), SPOffset);
30871 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
30872 // the last instruction of the expansion.
30874 MIB.setMemRefs(MMOs);
30877 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
30879 MI.eraseFromParent();
30883 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
30884 MachineBasicBlock *MBB,
30885 MachineBasicBlock *DispatchBB,
30887 DebugLoc DL = MI.getDebugLoc();
30888 MachineFunction *MF = MBB->getParent();
30889 MachineRegisterInfo *MRI = &MF->getRegInfo();
30890 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30892 MVT PVT = getPointerTy(MF->getDataLayout());
30893 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
30898 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30899 !isPositionIndependent();
30902 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30904 const TargetRegisterClass *TRC =
30905 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30906 VR = MRI->createVirtualRegister(TRC);
30907 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30909 if (Subtarget.is64Bit())
30910 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
30914 .addMBB(DispatchBB)
30917 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
30918 .addReg(0) /* TII->getGlobalBaseReg(MF) */
30921 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
30925 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
30926 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
30928 MIB.addMBB(DispatchBB);
30933 MachineBasicBlock *
30934 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
30935 MachineBasicBlock *BB) const {
30936 DebugLoc DL = MI.getDebugLoc();
30937 MachineFunction *MF = BB->getParent();
30938 MachineRegisterInfo *MRI = &MF->getRegInfo();
30939 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30940 int FI = MF->getFrameInfo().getFunctionContextIndex();
30942 // Get a mapping of the call site numbers to all of the landing pads they're
30943 // associated with.
30944 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
30945 unsigned MaxCSNum = 0;
30946 for (auto &MBB : *MF) {
30947 if (!MBB.isEHPad())
30950 MCSymbol *Sym = nullptr;
30951 for (const auto &MI : MBB) {
30952 if (MI.isDebugInstr())
30955 assert(MI.isEHLabel() && "expected EH_LABEL");
30956 Sym = MI.getOperand(0).getMCSymbol();
30960 if (!MF->hasCallSiteLandingPad(Sym))
30963 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
30964 CallSiteNumToLPad[CSI].push_back(&MBB);
30965 MaxCSNum = std::max(MaxCSNum, CSI);
30969 // Get an ordered list of the machine basic blocks for the jump table.
30970 std::vector<MachineBasicBlock *> LPadList;
30971 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
30972 LPadList.reserve(CallSiteNumToLPad.size());
30974 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
30975 for (auto &LP : CallSiteNumToLPad[CSI]) {
30976 LPadList.push_back(LP);
30977 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
30981 assert(!LPadList.empty() &&
30982 "No landing pad destinations for the dispatch jump table!");
30984 // Create the MBBs for the dispatch code.
30986 // Shove the dispatch's address into the return slot in the function context.
30987 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
30988 DispatchBB->setIsEHPad(true);
30990 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
30991 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
30992 DispatchBB->addSuccessor(TrapBB);
30994 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
30995 DispatchBB->addSuccessor(DispContBB);
30998 MF->push_back(DispatchBB);
30999 MF->push_back(DispContBB);
31000 MF->push_back(TrapBB);
31002 // Insert code into the entry block that creates and registers the function
31004 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
31006 // Create the jump table and associated information
31007 unsigned JTE = getJumpTableEncoding();
31008 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
31009 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
31011 const X86RegisterInfo &RI = TII->getRegisterInfo();
31012 // Add a register mask with no preserved registers. This results in all
31013 // registers being marked as clobbered.
31014 if (RI.hasBasePointer(*MF)) {
31015 const bool FPIs64Bit =
31016 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
31017 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
31018 MFI->setRestoreBasePointer(MF);
31020 Register FP = RI.getFrameRegister(*MF);
31021 Register BP = RI.getBaseRegister();
31022 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
31023 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
31024 MFI->getRestoreBasePointerOffset())
31025 .addRegMask(RI.getNoPreservedMask());
31027 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
31028 .addRegMask(RI.getNoPreservedMask());
31031 // IReg is used as an index in a memory operand and therefore can't be SP
31032 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
31033 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
31034 Subtarget.is64Bit() ? 8 : 4);
31035 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
31037 .addImm(LPadList.size());
31038 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
31040 if (Subtarget.is64Bit()) {
31041 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
31042 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
31044 // leaq .LJTI0_0(%rip), BReg
31045 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
31049 .addJumpTableIndex(MJTI)
31051 // movzx IReg64, IReg
31052 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
31055 .addImm(X86::sub_32bit);
31058 case MachineJumpTableInfo::EK_BlockAddress:
31059 // jmpq *(BReg,IReg64,8)
31060 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
31067 case MachineJumpTableInfo::EK_LabelDifference32: {
31068 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
31069 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
31070 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
31072 // movl (BReg,IReg64,4), OReg
31073 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
31079 // movsx OReg64, OReg
31080 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
31081 // addq BReg, OReg64, TReg
31082 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
31086 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
31090 llvm_unreachable("Unexpected jump table encoding");
31093 // jmpl *.LJTI0_0(,IReg,4)
31094 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
31098 .addJumpTableIndex(MJTI)
31102 // Add the jump table entries as successors to the MBB.
31103 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
31104 for (auto &LP : LPadList)
31105 if (SeenMBBs.insert(LP).second)
31106 DispContBB->addSuccessor(LP);
31108 // N.B. the order the invoke BBs are processed in doesn't matter here.
31109 SmallVector<MachineBasicBlock *, 64> MBBLPads;
31110 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
31111 for (MachineBasicBlock *MBB : InvokeBBs) {
31112 // Remove the landing pad successor from the invoke block and replace it
31113 // with the new dispatch block.
31114 // Keep a copy of Successors since it's modified inside the loop.
31115 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
31117 // FIXME: Avoid quadratic complexity.
31118 for (auto MBBS : Successors) {
31119 if (MBBS->isEHPad()) {
31120 MBB->removeSuccessor(MBBS);
31121 MBBLPads.push_back(MBBS);
31125 MBB->addSuccessor(DispatchBB);
31127 // Find the invoke call and mark all of the callee-saved registers as
31128 // 'implicit defined' so that they're spilled. This prevents code from
31129 // moving instructions to before the EH block, where they will never be
31131 for (auto &II : reverse(*MBB)) {
31135 DenseMap<unsigned, bool> DefRegs;
31136 for (auto &MOp : II.operands())
31138 DefRegs[MOp.getReg()] = true;
31140 MachineInstrBuilder MIB(*MF, &II);
31141 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
31142 unsigned Reg = SavedRegs[RegIdx];
31144 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
31151 // Mark all former landing pads as non-landing pads. The dispatch is the only
31152 // landing pad now.
31153 for (auto &LP : MBBLPads)
31154 LP->setIsEHPad(false);
31156 // The instruction is gone now.
31157 MI.eraseFromParent();
31161 MachineBasicBlock *
31162 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
31163 MachineBasicBlock *BB) const {
31164 MachineFunction *MF = BB->getParent();
31165 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31166 DebugLoc DL = MI.getDebugLoc();
31168 switch (MI.getOpcode()) {
31169 default: llvm_unreachable("Unexpected instr type to insert");
31170 case X86::TLS_addr32:
31171 case X86::TLS_addr64:
31172 case X86::TLS_base_addr32:
31173 case X86::TLS_base_addr64:
31174 return EmitLoweredTLSAddr(MI, BB);
31175 case X86::RETPOLINE_CALL32:
31176 case X86::RETPOLINE_CALL64:
31177 case X86::RETPOLINE_TCRETURN32:
31178 case X86::RETPOLINE_TCRETURN64:
31179 return EmitLoweredRetpoline(MI, BB);
31180 case X86::CATCHRET:
31181 return EmitLoweredCatchRet(MI, BB);
31182 case X86::CATCHPAD:
31183 return EmitLoweredCatchPad(MI, BB);
31184 case X86::SEG_ALLOCA_32:
31185 case X86::SEG_ALLOCA_64:
31186 return EmitLoweredSegAlloca(MI, BB);
31187 case X86::TLSCall_32:
31188 case X86::TLSCall_64:
31189 return EmitLoweredTLSCall(MI, BB);
31190 case X86::CMOV_FR32:
31191 case X86::CMOV_FR32X:
31192 case X86::CMOV_FR64:
31193 case X86::CMOV_FR64X:
31194 case X86::CMOV_GR8:
31195 case X86::CMOV_GR16:
31196 case X86::CMOV_GR32:
31197 case X86::CMOV_RFP32:
31198 case X86::CMOV_RFP64:
31199 case X86::CMOV_RFP80:
31200 case X86::CMOV_VR128:
31201 case X86::CMOV_VR128X:
31202 case X86::CMOV_VR256:
31203 case X86::CMOV_VR256X:
31204 case X86::CMOV_VR512:
31205 case X86::CMOV_VK2:
31206 case X86::CMOV_VK4:
31207 case X86::CMOV_VK8:
31208 case X86::CMOV_VK16:
31209 case X86::CMOV_VK32:
31210 case X86::CMOV_VK64:
31211 return EmitLoweredSelect(MI, BB);
31213 case X86::RDFLAGS32:
31214 case X86::RDFLAGS64: {
31216 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
31217 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
31218 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
31219 // Permit reads of the EFLAGS and DF registers without them being defined.
31220 // This intrinsic exists to read external processor state in flags, such as
31221 // the trap flag, interrupt flag, and direction flag, none of which are
31222 // modeled by the backend.
31223 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
31224 "Unexpected register in operand!");
31225 Push->getOperand(2).setIsUndef();
31226 assert(Push->getOperand(3).getReg() == X86::DF &&
31227 "Unexpected register in operand!");
31228 Push->getOperand(3).setIsUndef();
31229 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
31231 MI.eraseFromParent(); // The pseudo is gone now.
31235 case X86::WRFLAGS32:
31236 case X86::WRFLAGS64: {
31238 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
31240 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
31241 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
31242 BuildMI(*BB, MI, DL, TII->get(PopF));
31244 MI.eraseFromParent(); // The pseudo is gone now.
31248 case X86::FP32_TO_INT16_IN_MEM:
31249 case X86::FP32_TO_INT32_IN_MEM:
31250 case X86::FP32_TO_INT64_IN_MEM:
31251 case X86::FP64_TO_INT16_IN_MEM:
31252 case X86::FP64_TO_INT32_IN_MEM:
31253 case X86::FP64_TO_INT64_IN_MEM:
31254 case X86::FP80_TO_INT16_IN_MEM:
31255 case X86::FP80_TO_INT32_IN_MEM:
31256 case X86::FP80_TO_INT64_IN_MEM: {
31257 // Change the floating point control register to use "round towards zero"
31258 // mode when truncating to an integer value.
31259 int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
31260 addFrameReference(BuildMI(*BB, MI, DL,
31261 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
31263 // Load the old value of the control word...
31264 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
31265 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
31268 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
31269 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
31270 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
31271 .addReg(OldCW, RegState::Kill).addImm(0xC00);
31273 // Extract to 16 bits.
31275 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
31276 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
31277 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
31279 // Prepare memory for FLDCW.
31280 int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
31281 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
31283 .addReg(NewCW16, RegState::Kill);
31285 // Reload the modified control word now...
31286 addFrameReference(BuildMI(*BB, MI, DL,
31287 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
31289 // Get the X86 opcode to use.
31291 switch (MI.getOpcode()) {
31292 default: llvm_unreachable("illegal opcode!");
31293 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
31294 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
31295 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
31296 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
31297 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
31298 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
31299 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
31300 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
31301 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
31304 X86AddressMode AM = getAddressFromInstr(&MI, 0);
31305 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
31306 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
31308 // Reload the original control word now.
31309 addFrameReference(BuildMI(*BB, MI, DL,
31310 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
31312 MI.eraseFromParent(); // The pseudo instruction is gone now.
31318 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
31320 case X86::VASTART_SAVE_XMM_REGS:
31321 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
31323 case X86::VAARG_64:
31324 return EmitVAARG64WithCustomInserter(MI, BB);
31326 case X86::EH_SjLj_SetJmp32:
31327 case X86::EH_SjLj_SetJmp64:
31328 return emitEHSjLjSetJmp(MI, BB);
31330 case X86::EH_SjLj_LongJmp32:
31331 case X86::EH_SjLj_LongJmp64:
31332 return emitEHSjLjLongJmp(MI, BB);
31334 case X86::Int_eh_sjlj_setup_dispatch:
31335 return EmitSjLjDispatchBlock(MI, BB);
31337 case TargetOpcode::STATEPOINT:
31338 // As an implementation detail, STATEPOINT shares the STACKMAP format at
31339 // this point in the process. We diverge later.
31340 return emitPatchPoint(MI, BB);
31342 case TargetOpcode::STACKMAP:
31343 case TargetOpcode::PATCHPOINT:
31344 return emitPatchPoint(MI, BB);
31346 case TargetOpcode::PATCHABLE_EVENT_CALL:
31347 return emitXRayCustomEvent(MI, BB);
31349 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
31350 return emitXRayTypedEvent(MI, BB);
31352 case X86::LCMPXCHG8B: {
31353 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
31354 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
31355 // requires a memory operand. If it happens that current architecture is
31356 // i686 and for current function we need a base pointer
31357 // - which is ESI for i686 - register allocator would not be able to
31358 // allocate registers for an address in form of X(%reg, %reg, Y)
31359 // - there never would be enough unreserved registers during regalloc
31360 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
31361 // We are giving a hand to register allocator by precomputing the address in
31362 // a new vreg using LEA.
31364 // If it is not i686 or there is no base pointer - nothing to do here.
31365 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
31368 // Even though this code does not necessarily needs the base pointer to
31369 // be ESI, we check for that. The reason: if this assert fails, there are
31370 // some changes happened in the compiler base pointer handling, which most
31371 // probably have to be addressed somehow here.
31372 assert(TRI->getBaseRegister() == X86::ESI &&
31373 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
31374 "base pointer in mind");
31376 MachineRegisterInfo &MRI = MF->getRegInfo();
31377 MVT SPTy = getPointerTy(MF->getDataLayout());
31378 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
31379 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
31381 X86AddressMode AM = getAddressFromInstr(&MI, 0);
31382 // Regalloc does not need any help when the memory operand of CMPXCHG8B
31383 // does not use index register.
31384 if (AM.IndexReg == X86::NoRegister)
31387 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
31388 // four operand definitions that are E[ABCD] registers. We skip them and
31389 // then insert the LEA.
31390 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
31391 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
31392 RMBBI->definesRegister(X86::EBX) ||
31393 RMBBI->definesRegister(X86::ECX) ||
31394 RMBBI->definesRegister(X86::EDX))) {
31397 MachineBasicBlock::iterator MBBI(RMBBI);
31399 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
31401 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
31405 case X86::LCMPXCHG16B:
31407 case X86::LCMPXCHG8B_SAVE_EBX:
31408 case X86::LCMPXCHG16B_SAVE_RBX: {
31410 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
31411 if (!BB->isLiveIn(BasePtr))
31412 BB->addLiveIn(BasePtr);
31418 //===----------------------------------------------------------------------===//
31419 // X86 Optimization Hooks
31420 //===----------------------------------------------------------------------===//
31423 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
31424 const APInt &Demanded,
31425 TargetLoweringOpt &TLO) const {
31426 // Only optimize Ands to prevent shrinking a constant that could be
31427 // matched by movzx.
31428 if (Op.getOpcode() != ISD::AND)
31431 EVT VT = Op.getValueType();
31437 unsigned Size = VT.getSizeInBits();
31439 // Make sure the RHS really is a constant.
31440 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
31444 const APInt &Mask = C->getAPIntValue();
31446 // Clear all non-demanded bits initially.
31447 APInt ShrunkMask = Mask & Demanded;
31449 // Find the width of the shrunk mask.
31450 unsigned Width = ShrunkMask.getActiveBits();
31452 // If the mask is all 0s there's nothing to do here.
31456 // Find the next power of 2 width, rounding up to a byte.
31457 Width = PowerOf2Ceil(std::max(Width, 8U));
31458 // Truncate the width to size to handle illegal types.
31459 Width = std::min(Width, Size);
31461 // Calculate a possible zero extend mask for this constant.
31462 APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
31464 // If we aren't changing the mask, just return true to keep it and prevent
31465 // the caller from optimizing.
31466 if (ZeroExtendMask == Mask)
31469 // Make sure the new mask can be represented by a combination of mask bits
31470 // and non-demanded bits.
31471 if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
31474 // Replace the constant with the zero extend mask.
31476 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
31477 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
31478 return TLO.CombineTo(Op, NewOp);
31481 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
31483 const APInt &DemandedElts,
31484 const SelectionDAG &DAG,
31485 unsigned Depth) const {
31486 unsigned BitWidth = Known.getBitWidth();
31487 unsigned Opc = Op.getOpcode();
31488 EVT VT = Op.getValueType();
31489 assert((Opc >= ISD::BUILTIN_OP_END ||
31490 Opc == ISD::INTRINSIC_WO_CHAIN ||
31491 Opc == ISD::INTRINSIC_W_CHAIN ||
31492 Opc == ISD::INTRINSIC_VOID) &&
31493 "Should use MaskedValueIsZero if you don't know whether Op"
31494 " is a target node!");
31499 case X86ISD::SETCC:
31500 Known.Zero.setBitsFrom(1);
31502 case X86ISD::MOVMSK: {
31503 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
31504 Known.Zero.setBitsFrom(NumLoBits);
31507 case X86ISD::PEXTRB:
31508 case X86ISD::PEXTRW: {
31509 SDValue Src = Op.getOperand(0);
31510 EVT SrcVT = Src.getValueType();
31511 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
31512 Op.getConstantOperandVal(1));
31513 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
31514 Known = Known.zextOrTrunc(BitWidth, false);
31515 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
31518 case X86ISD::VSRAI:
31519 case X86ISD::VSHLI:
31520 case X86ISD::VSRLI: {
31521 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31522 if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
31523 Known.setAllZero();
31527 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31528 unsigned ShAmt = ShiftImm->getZExtValue();
31529 if (Opc == X86ISD::VSHLI) {
31530 Known.Zero <<= ShAmt;
31531 Known.One <<= ShAmt;
31532 // Low bits are known zero.
31533 Known.Zero.setLowBits(ShAmt);
31534 } else if (Opc == X86ISD::VSRLI) {
31535 Known.Zero.lshrInPlace(ShAmt);
31536 Known.One.lshrInPlace(ShAmt);
31537 // High bits are known zero.
31538 Known.Zero.setHighBits(ShAmt);
31540 Known.Zero.ashrInPlace(ShAmt);
31541 Known.One.ashrInPlace(ShAmt);
31546 case X86ISD::PACKUS: {
31547 // PACKUS is just a truncation if the upper half is zero.
31548 APInt DemandedLHS, DemandedRHS;
31549 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
31551 Known.One = APInt::getAllOnesValue(BitWidth * 2);
31552 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
31555 if (!!DemandedLHS) {
31556 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31557 Known.One &= Known2.One;
31558 Known.Zero &= Known2.Zero;
31560 if (!!DemandedRHS) {
31561 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31562 Known.One &= Known2.One;
31563 Known.Zero &= Known2.Zero;
31566 if (Known.countMinLeadingZeros() < BitWidth)
31568 Known = Known.trunc(BitWidth);
31571 case X86ISD::ANDNP: {
31573 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31574 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31576 // ANDNP = (~X & Y);
31577 Known.One &= Known2.Zero;
31578 Known.Zero |= Known2.One;
31581 case X86ISD::FOR: {
31583 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31584 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31586 // Output known-0 bits are only known if clear in both the LHS & RHS.
31587 Known.Zero &= Known2.Zero;
31588 // Output known-1 are known to be set if set in either the LHS | RHS.
31589 Known.One |= Known2.One;
31592 case X86ISD::PSADBW: {
31593 assert(VT.getScalarType() == MVT::i64 &&
31594 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
31595 "Unexpected PSADBW types");
31597 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
31598 Known.Zero.setBitsFrom(16);
31601 case X86ISD::CMOV: {
31602 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
31603 // If we don't know any bits, early out.
31604 if (Known.isUnknown())
31606 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
31608 // Only known if known in both the LHS and RHS.
31609 Known.One &= Known2.One;
31610 Known.Zero &= Known2.Zero;
31615 // Handle target shuffles.
31616 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31617 if (isTargetShuffle(Opc)) {
31619 SmallVector<int, 64> Mask;
31620 SmallVector<SDValue, 2> Ops;
31621 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31623 unsigned NumOps = Ops.size();
31624 unsigned NumElts = VT.getVectorNumElements();
31625 if (Mask.size() == NumElts) {
31626 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31627 Known.Zero.setAllBits(); Known.One.setAllBits();
31628 for (unsigned i = 0; i != NumElts; ++i) {
31629 if (!DemandedElts[i])
31632 if (M == SM_SentinelUndef) {
31633 // For UNDEF elements, we don't know anything about the common state
31634 // of the shuffle result.
31637 } else if (M == SM_SentinelZero) {
31638 Known.One.clearAllBits();
31641 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
31642 "Shuffle index out of range");
31644 unsigned OpIdx = (unsigned)M / NumElts;
31645 unsigned EltIdx = (unsigned)M % NumElts;
31646 if (Ops[OpIdx].getValueType() != VT) {
31647 // TODO - handle target shuffle ops with different value types.
31651 DemandedOps[OpIdx].setBit(EltIdx);
31653 // Known bits are the values that are shared by every demanded element.
31654 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
31655 if (!DemandedOps[i])
31658 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
31659 Known.One &= Known2.One;
31660 Known.Zero &= Known2.Zero;
31667 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
31668 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
31669 unsigned Depth) const {
31670 EVT VT = Op.getValueType();
31671 unsigned VTBits = VT.getScalarSizeInBits();
31672 unsigned Opcode = Op.getOpcode();
31674 case X86ISD::SETCC_CARRY:
31675 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
31678 case X86ISD::VTRUNC: {
31679 // TODO: Add DemandedElts support.
31680 SDValue Src = Op.getOperand(0);
31681 unsigned NumSrcBits = Src.getScalarValueSizeInBits();
31682 assert(VTBits < NumSrcBits && "Illegal truncation input type");
31683 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
31684 if (Tmp > (NumSrcBits - VTBits))
31685 return Tmp - (NumSrcBits - VTBits);
31689 case X86ISD::PACKSS: {
31690 // PACKSS is just a truncation if the sign bits extend to the packed size.
31691 APInt DemandedLHS, DemandedRHS;
31692 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
31695 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
31696 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
31698 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31700 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31701 unsigned Tmp = std::min(Tmp0, Tmp1);
31702 if (Tmp > (SrcBits - VTBits))
31703 return Tmp - (SrcBits - VTBits);
31707 case X86ISD::VSHLI: {
31708 SDValue Src = Op.getOperand(0);
31709 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
31710 if (ShiftVal.uge(VTBits))
31711 return VTBits; // Shifted all bits out --> zero.
31712 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31713 if (ShiftVal.uge(Tmp))
31714 return 1; // Shifted all sign bits out --> unknown.
31715 return Tmp - ShiftVal.getZExtValue();
31718 case X86ISD::VSRAI: {
31719 SDValue Src = Op.getOperand(0);
31720 APInt ShiftVal = Op.getConstantOperandAPInt(1);
31721 if (ShiftVal.uge(VTBits - 1))
31722 return VTBits; // Sign splat.
31723 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31725 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
31728 case X86ISD::PCMPGT:
31729 case X86ISD::PCMPEQ:
31731 case X86ISD::VPCOM:
31732 case X86ISD::VPCOMU:
31733 // Vector compares return zero/all-bits result values.
31736 case X86ISD::ANDNP: {
31738 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
31739 if (Tmp0 == 1) return 1; // Early out.
31741 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
31742 return std::min(Tmp0, Tmp1);
31745 case X86ISD::CMOV: {
31746 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
31747 if (Tmp0 == 1) return 1; // Early out.
31748 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
31749 return std::min(Tmp0, Tmp1);
31753 // Handle target shuffles.
31754 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31755 if (isTargetShuffle(Opcode)) {
31757 SmallVector<int, 64> Mask;
31758 SmallVector<SDValue, 2> Ops;
31759 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31761 unsigned NumOps = Ops.size();
31762 unsigned NumElts = VT.getVectorNumElements();
31763 if (Mask.size() == NumElts) {
31764 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31765 for (unsigned i = 0; i != NumElts; ++i) {
31766 if (!DemandedElts[i])
31769 if (M == SM_SentinelUndef) {
31770 // For UNDEF elements, we don't know anything about the common state
31771 // of the shuffle result.
31773 } else if (M == SM_SentinelZero) {
31774 // Zero = all sign bits.
31777 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
31778 "Shuffle index out of range");
31780 unsigned OpIdx = (unsigned)M / NumElts;
31781 unsigned EltIdx = (unsigned)M % NumElts;
31782 if (Ops[OpIdx].getValueType() != VT) {
31783 // TODO - handle target shuffle ops with different value types.
31786 DemandedOps[OpIdx].setBit(EltIdx);
31788 unsigned Tmp0 = VTBits;
31789 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
31790 if (!DemandedOps[i])
31793 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
31794 Tmp0 = std::min(Tmp0, Tmp1);
31805 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
31806 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
31807 return N->getOperand(0);
31811 // Attempt to match a combined shuffle mask against supported unary shuffle
31813 // TODO: Investigate sharing more of this with shuffle lowering.
31814 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31815 bool AllowFloatDomain, bool AllowIntDomain,
31816 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
31817 const X86Subtarget &Subtarget, unsigned &Shuffle,
31818 MVT &SrcVT, MVT &DstVT) {
31819 unsigned NumMaskElts = Mask.size();
31820 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
31822 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
31823 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
31824 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
31825 Shuffle = X86ISD::VZEXT_MOVL;
31826 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31830 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
31831 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
31832 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
31833 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
31834 unsigned MaxScale = 64 / MaskEltSize;
31835 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
31836 bool MatchAny = true;
31837 bool MatchZero = true;
31838 unsigned NumDstElts = NumMaskElts / Scale;
31839 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
31840 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
31841 MatchAny = MatchZero = false;
31844 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
31845 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
31847 if (MatchAny || MatchZero) {
31848 assert(MatchZero && "Failed to match zext but matched aext?");
31849 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
31850 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
31851 MVT::getIntegerVT(MaskEltSize);
31852 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
31854 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
31855 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
31857 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
31858 if (SrcVT.getVectorNumElements() != NumDstElts)
31859 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
31861 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
31862 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
31868 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
31869 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
31870 isUndefOrEqual(Mask[0], 0) &&
31871 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
31872 Shuffle = X86ISD::VZEXT_MOVL;
31873 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31877 // Check if we have SSE3 which will let us use MOVDDUP etc. The
31878 // instructions are no slower than UNPCKLPD but has the option to
31879 // fold the input operand into even an unaligned memory load.
31880 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
31881 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
31882 Shuffle = X86ISD::MOVDDUP;
31883 SrcVT = DstVT = MVT::v2f64;
31886 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31887 Shuffle = X86ISD::MOVSLDUP;
31888 SrcVT = DstVT = MVT::v4f32;
31891 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
31892 Shuffle = X86ISD::MOVSHDUP;
31893 SrcVT = DstVT = MVT::v4f32;
31898 if (MaskVT.is256BitVector() && AllowFloatDomain) {
31899 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
31900 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31901 Shuffle = X86ISD::MOVDDUP;
31902 SrcVT = DstVT = MVT::v4f64;
31905 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31906 Shuffle = X86ISD::MOVSLDUP;
31907 SrcVT = DstVT = MVT::v8f32;
31910 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
31911 Shuffle = X86ISD::MOVSHDUP;
31912 SrcVT = DstVT = MVT::v8f32;
31917 if (MaskVT.is512BitVector() && AllowFloatDomain) {
31918 assert(Subtarget.hasAVX512() &&
31919 "AVX512 required for 512-bit vector shuffles");
31920 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31921 Shuffle = X86ISD::MOVDDUP;
31922 SrcVT = DstVT = MVT::v8f64;
31925 if (isTargetShuffleEquivalent(
31926 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
31927 Shuffle = X86ISD::MOVSLDUP;
31928 SrcVT = DstVT = MVT::v16f32;
31931 if (isTargetShuffleEquivalent(
31932 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
31933 Shuffle = X86ISD::MOVSHDUP;
31934 SrcVT = DstVT = MVT::v16f32;
31942 // Attempt to match a combined shuffle mask against supported unary immediate
31943 // permute instructions.
31944 // TODO: Investigate sharing more of this with shuffle lowering.
31945 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
31946 const APInt &Zeroable,
31947 bool AllowFloatDomain, bool AllowIntDomain,
31948 const X86Subtarget &Subtarget,
31949 unsigned &Shuffle, MVT &ShuffleVT,
31950 unsigned &PermuteImm) {
31951 unsigned NumMaskElts = Mask.size();
31952 unsigned InputSizeInBits = MaskVT.getSizeInBits();
31953 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
31954 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
31956 bool ContainsZeros =
31957 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
31959 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
31960 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
31961 // Check for lane crossing permutes.
31962 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
31963 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
31964 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
31965 Shuffle = X86ISD::VPERMI;
31966 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
31967 PermuteImm = getV4X86ShuffleImm(Mask);
31970 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
31971 SmallVector<int, 4> RepeatedMask;
31972 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
31973 Shuffle = X86ISD::VPERMI;
31974 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
31975 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
31979 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
31980 // VPERMILPD can permute with a non-repeating shuffle.
31981 Shuffle = X86ISD::VPERMILPI;
31982 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
31984 for (int i = 0, e = Mask.size(); i != e; ++i) {
31986 if (M == SM_SentinelUndef)
31988 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
31989 PermuteImm |= (M & 1) << i;
31995 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
31996 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
31997 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
31998 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
31999 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
32000 SmallVector<int, 4> RepeatedMask;
32001 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
32002 // Narrow the repeated mask to create 32-bit element permutes.
32003 SmallVector<int, 4> WordMask = RepeatedMask;
32004 if (MaskScalarSizeInBits == 64)
32005 scaleShuffleMask<int>(2, RepeatedMask, WordMask);
32007 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
32008 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
32009 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
32010 PermuteImm = getV4X86ShuffleImm(WordMask);
32015 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
32016 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
32017 SmallVector<int, 4> RepeatedMask;
32018 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
32019 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
32020 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
32022 // PSHUFLW: permute lower 4 elements only.
32023 if (isUndefOrInRange(LoMask, 0, 4) &&
32024 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
32025 Shuffle = X86ISD::PSHUFLW;
32026 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
32027 PermuteImm = getV4X86ShuffleImm(LoMask);
32031 // PSHUFHW: permute upper 4 elements only.
32032 if (isUndefOrInRange(HiMask, 4, 8) &&
32033 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
32034 // Offset the HiMask so that we can create the shuffle immediate.
32035 int OffsetHiMask[4];
32036 for (int i = 0; i != 4; ++i)
32037 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
32039 Shuffle = X86ISD::PSHUFHW;
32040 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
32041 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
32047 // Attempt to match against byte/bit shifts.
32048 // FIXME: Add 512-bit support.
32049 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32050 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
32051 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
32052 Mask, 0, Zeroable, Subtarget);
32053 if (0 < ShiftAmt) {
32054 PermuteImm = (unsigned)ShiftAmt;
32062 // Attempt to match a combined unary shuffle mask against supported binary
32063 // shuffle instructions.
32064 // TODO: Investigate sharing more of this with shuffle lowering.
32065 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
32066 bool AllowFloatDomain, bool AllowIntDomain,
32067 SDValue &V1, SDValue &V2, const SDLoc &DL,
32068 SelectionDAG &DAG, const X86Subtarget &Subtarget,
32069 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
32071 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
32073 if (MaskVT.is128BitVector()) {
32074 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
32076 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
32077 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
32078 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
32081 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
32083 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
32084 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
32087 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
32088 (AllowFloatDomain || !Subtarget.hasSSE41())) {
32090 Shuffle = X86ISD::MOVSD;
32091 SrcVT = DstVT = MVT::v2f64;
32094 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
32095 (AllowFloatDomain || !Subtarget.hasSSE41())) {
32096 Shuffle = X86ISD::MOVSS;
32097 SrcVT = DstVT = MVT::v4f32;
32102 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
32103 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
32104 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
32105 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
32106 if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
32113 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
32114 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
32115 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32116 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
32117 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
32118 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
32119 if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
32121 SrcVT = DstVT = MaskVT;
32122 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
32123 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
32131 static bool matchBinaryPermuteShuffle(
32132 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
32133 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
32134 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
32135 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
32136 unsigned NumMaskElts = Mask.size();
32137 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
32139 // Attempt to match against PALIGNR byte rotate.
32140 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32141 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
32142 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
32143 if (0 < ByteRotation) {
32144 Shuffle = X86ISD::PALIGNR;
32145 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
32146 PermuteImm = ByteRotation;
32151 // Attempt to combine to X86ISD::BLENDI.
32152 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
32153 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
32154 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
32155 uint64_t BlendMask = 0;
32156 bool ForceV1Zero = false, ForceV2Zero = false;
32157 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
32158 if (matchVectorShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
32159 ForceV2Zero, BlendMask)) {
32160 if (MaskVT == MVT::v16i16) {
32161 // We can only use v16i16 PBLENDW if the lanes are repeated.
32162 SmallVector<int, 8> RepeatedMask;
32163 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
32165 assert(RepeatedMask.size() == 8 &&
32166 "Repeated mask size doesn't match!");
32168 for (int i = 0; i < 8; ++i)
32169 if (RepeatedMask[i] >= 8)
32170 PermuteImm |= 1 << i;
32171 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32172 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32173 Shuffle = X86ISD::BLENDI;
32174 ShuffleVT = MaskVT;
32178 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32179 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32180 PermuteImm = (unsigned)BlendMask;
32181 Shuffle = X86ISD::BLENDI;
32182 ShuffleVT = MaskVT;
32188 // Attempt to combine to INSERTPS, but only if it has elements that need to
32190 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
32191 MaskVT.is128BitVector() &&
32192 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
32193 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
32194 Shuffle = X86ISD::INSERTPS;
32195 ShuffleVT = MVT::v4f32;
32199 // Attempt to combine to SHUFPD.
32200 if (AllowFloatDomain && EltSizeInBits == 64 &&
32201 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32202 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
32203 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
32204 bool ForceV1Zero = false, ForceV2Zero = false;
32205 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
32206 PermuteImm, Mask, Zeroable)) {
32207 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32208 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32209 Shuffle = X86ISD::SHUFP;
32210 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
32215 // Attempt to combine to SHUFPS.
32216 if (AllowFloatDomain && EltSizeInBits == 32 &&
32217 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
32218 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
32219 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
32220 SmallVector<int, 4> RepeatedMask;
32221 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
32222 // Match each half of the repeated mask, to determine if its just
32223 // referencing one of the vectors, is zeroable or entirely undef.
32224 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
32225 int M0 = RepeatedMask[Offset];
32226 int M1 = RepeatedMask[Offset + 1];
32228 if (isUndefInRange(RepeatedMask, Offset, 2)) {
32229 return DAG.getUNDEF(MaskVT);
32230 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
32231 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
32232 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
32233 return getZeroVector(MaskVT, Subtarget, DAG, DL);
32234 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
32235 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
32236 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
32238 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
32239 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
32240 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
32247 int ShufMask[4] = {-1, -1, -1, -1};
32248 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
32249 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
32254 Shuffle = X86ISD::SHUFP;
32255 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
32256 PermuteImm = getV4X86ShuffleImm(ShufMask);
32262 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
32263 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
32264 MaskVT.is128BitVector() &&
32265 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
32266 Shuffle = X86ISD::INSERTPS;
32267 ShuffleVT = MVT::v4f32;
32274 static SDValue combineX86ShuffleChainWithExtract(
32275 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32276 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32277 const X86Subtarget &Subtarget);
32279 /// Combine an arbitrary chain of shuffles into a single instruction if
32282 /// This is the leaf of the recursive combine below. When we have found some
32283 /// chain of single-use x86 shuffle instructions and accumulated the combined
32284 /// shuffle mask represented by them, this will try to pattern match that mask
32285 /// into either a single instruction if there is a special purpose instruction
32286 /// for this operation, or into a PSHUFB instruction which is a fully general
32287 /// instruction but should only be used to replace chains over a certain depth.
32288 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
32289 ArrayRef<int> BaseMask, int Depth,
32290 bool HasVariableMask,
32291 bool AllowVariableMask, SelectionDAG &DAG,
32292 const X86Subtarget &Subtarget) {
32293 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
32294 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
32295 "Unexpected number of shuffle inputs!");
32297 // Find the inputs that enter the chain. Note that multiple uses are OK
32298 // here, we're not going to remove the operands we find.
32299 bool UnaryShuffle = (Inputs.size() == 1);
32300 SDValue V1 = peekThroughBitcasts(Inputs[0]);
32301 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
32302 : peekThroughBitcasts(Inputs[1]));
32304 MVT VT1 = V1.getSimpleValueType();
32305 MVT VT2 = V2.getSimpleValueType();
32306 MVT RootVT = Root.getSimpleValueType();
32307 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
32308 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
32309 "Vector size mismatch");
32314 unsigned NumBaseMaskElts = BaseMask.size();
32315 if (NumBaseMaskElts == 1) {
32316 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
32317 return DAG.getBitcast(RootVT, V1);
32320 unsigned RootSizeInBits = RootVT.getSizeInBits();
32321 unsigned NumRootElts = RootVT.getVectorNumElements();
32322 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
32323 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
32324 (RootVT.isFloatingPoint() && Depth >= 1) ||
32325 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
32327 // Don't combine if we are a AVX512/EVEX target and the mask element size
32328 // is different from the root element size - this would prevent writemasks
32329 // from being reused.
32330 // TODO - this currently prevents all lane shuffles from occurring.
32331 // TODO - check for writemasks usage instead of always preventing combining.
32332 // TODO - attempt to narrow Mask back to writemask size.
32333 bool IsEVEXShuffle =
32334 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
32336 // Attempt to match a subvector broadcast.
32337 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
32338 if (UnaryShuffle &&
32339 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
32340 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
32341 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
32342 SDValue Src = Inputs[0];
32343 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
32344 Src.getOperand(0).isUndef() &&
32345 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
32346 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
32347 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
32348 Src.getValueType(),
32349 Src.getOperand(1)));
32354 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
32356 // Handle 128-bit lane shuffles of 256-bit vectors.
32357 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
32358 // we need to use the zeroing feature.
32359 // TODO - this should support binary shuffles.
32360 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
32361 !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
32362 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
32363 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
32364 return SDValue(); // Nothing to do!
32365 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
32366 unsigned PermMask = 0;
32367 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
32368 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
32370 Res = DAG.getBitcast(ShuffleVT, V1);
32371 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
32372 DAG.getUNDEF(ShuffleVT),
32373 DAG.getTargetConstant(PermMask, DL, MVT::i8));
32374 return DAG.getBitcast(RootVT, Res);
32377 // For masks that have been widened to 128-bit elements or more,
32378 // narrow back down to 64-bit elements.
32379 SmallVector<int, 64> Mask;
32380 if (BaseMaskEltSizeInBits > 64) {
32381 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
32382 int MaskScale = BaseMaskEltSizeInBits / 64;
32383 scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
32385 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
32388 unsigned NumMaskElts = Mask.size();
32389 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
32391 // Determine the effective mask value type.
32392 FloatDomain &= (32 <= MaskEltSizeInBits);
32393 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
32394 : MVT::getIntegerVT(MaskEltSizeInBits);
32395 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
32397 // Only allow legal mask types.
32398 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
32401 // Attempt to match the mask against known shuffle patterns.
32402 MVT ShuffleSrcVT, ShuffleVT;
32403 unsigned Shuffle, PermuteImm;
32405 // Which shuffle domains are permitted?
32406 // Permit domain crossing at higher combine depths.
32407 // TODO: Should we indicate which domain is preferred if both are allowed?
32408 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
32409 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
32410 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
32412 // Determine zeroable mask elements.
32413 APInt Zeroable(NumMaskElts, 0);
32414 for (unsigned i = 0; i != NumMaskElts; ++i)
32415 if (isUndefOrZero(Mask[i]))
32416 Zeroable.setBit(i);
32418 if (UnaryShuffle) {
32419 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
32420 // directly if we don't shuffle the lower element and we shuffle the upper
32421 // (zero) elements within themselves.
32422 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
32423 (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
32424 MaskEltSizeInBits) == 0) {
32426 cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
32428 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
32429 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
32430 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
32431 return DAG.getBitcast(RootVT, V1);
32435 // Attempt to match against broadcast-from-vector.
32436 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
32437 if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
32438 && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
32439 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
32440 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
32441 if (V1.getValueType() == MaskVT &&
32442 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
32443 MayFoldLoad(V1.getOperand(0))) {
32444 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
32445 return SDValue(); // Nothing to do!
32446 Res = V1.getOperand(0);
32447 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32448 return DAG.getBitcast(RootVT, Res);
32450 if (Subtarget.hasAVX2()) {
32451 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
32452 return SDValue(); // Nothing to do!
32453 Res = DAG.getBitcast(MaskVT, V1);
32454 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32455 return DAG.getBitcast(RootVT, Res);
32460 SDValue NewV1 = V1; // Save operand in case early exit happens.
32461 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32462 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32464 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32465 if (Depth == 0 && Root.getOpcode() == Shuffle)
32466 return SDValue(); // Nothing to do!
32467 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
32468 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
32469 return DAG.getBitcast(RootVT, Res);
32472 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
32473 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
32475 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32476 if (Depth == 0 && Root.getOpcode() == Shuffle)
32477 return SDValue(); // Nothing to do!
32478 Res = DAG.getBitcast(ShuffleVT, V1);
32479 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
32480 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
32481 return DAG.getBitcast(RootVT, Res);
32485 SDValue NewV1 = V1; // Save operands in case early exit happens.
32486 SDValue NewV2 = V2;
32487 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32488 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32489 ShuffleVT, UnaryShuffle) &&
32490 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32491 if (Depth == 0 && Root.getOpcode() == Shuffle)
32492 return SDValue(); // Nothing to do!
32493 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
32494 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
32495 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
32496 return DAG.getBitcast(RootVT, Res);
32499 NewV1 = V1; // Save operands in case early exit happens.
32501 if (matchBinaryPermuteShuffle(
32502 MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
32503 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
32504 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32505 if (Depth == 0 && Root.getOpcode() == Shuffle)
32506 return SDValue(); // Nothing to do!
32507 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
32508 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
32509 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
32510 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
32511 return DAG.getBitcast(RootVT, Res);
32514 // Typically from here on, we need an integer version of MaskVT.
32515 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
32516 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
32518 // Annoyingly, SSE4A instructions don't map into the above match helpers.
32519 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
32520 uint64_t BitLen, BitIdx;
32521 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
32523 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
32524 return SDValue(); // Nothing to do!
32525 V1 = DAG.getBitcast(IntMaskVT, V1);
32526 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
32527 DAG.getTargetConstant(BitLen, DL, MVT::i8),
32528 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
32529 return DAG.getBitcast(RootVT, Res);
32532 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
32533 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
32534 return SDValue(); // Nothing to do!
32535 V1 = DAG.getBitcast(IntMaskVT, V1);
32536 V2 = DAG.getBitcast(IntMaskVT, V2);
32537 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
32538 DAG.getTargetConstant(BitLen, DL, MVT::i8),
32539 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
32540 return DAG.getBitcast(RootVT, Res);
32544 // Don't try to re-form single instruction chains under any circumstances now
32545 // that we've done encoding canonicalization for them.
32549 // Depth threshold above which we can efficiently use variable mask shuffles.
32550 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
32551 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
32553 bool MaskContainsZeros =
32554 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
32556 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
32557 // If we have a single input lane-crossing shuffle then lower to VPERMV.
32558 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32559 ((Subtarget.hasAVX2() &&
32560 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32561 (Subtarget.hasAVX512() &&
32562 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32563 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32564 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32565 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32566 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32567 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32568 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32569 Res = DAG.getBitcast(MaskVT, V1);
32570 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
32571 return DAG.getBitcast(RootVT, Res);
32574 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
32575 // vector as the second source.
32576 if (UnaryShuffle && AllowVariableMask &&
32577 ((Subtarget.hasAVX512() &&
32578 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32579 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32580 (Subtarget.hasVLX() &&
32581 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32582 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32583 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32584 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32585 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32586 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32587 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
32588 for (unsigned i = 0; i != NumMaskElts; ++i)
32589 if (Mask[i] == SM_SentinelZero)
32590 Mask[i] = NumMaskElts + i;
32592 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32593 Res = DAG.getBitcast(MaskVT, V1);
32594 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
32595 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
32596 return DAG.getBitcast(RootVT, Res);
32599 // If that failed and either input is extracted then try to combine as a
32600 // shuffle with the larger type.
32601 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32602 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32604 return WideShuffle;
32606 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
32607 if (AllowVariableMask && !MaskContainsZeros &&
32608 ((Subtarget.hasAVX512() &&
32609 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32610 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32611 (Subtarget.hasVLX() &&
32612 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32613 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32614 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32615 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32616 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32617 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32618 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32619 V1 = DAG.getBitcast(MaskVT, V1);
32620 V2 = DAG.getBitcast(MaskVT, V2);
32621 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32622 return DAG.getBitcast(RootVT, Res);
32627 // See if we can combine a single input shuffle with zeros to a bit-mask,
32628 // which is much simpler than any shuffle.
32629 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
32630 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
32631 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
32632 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
32633 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
32634 APInt UndefElts(NumMaskElts, 0);
32635 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
32636 for (unsigned i = 0; i != NumMaskElts; ++i) {
32638 if (M == SM_SentinelUndef) {
32639 UndefElts.setBit(i);
32642 if (M == SM_SentinelZero)
32644 EltBits[i] = AllOnes;
32646 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
32647 Res = DAG.getBitcast(MaskVT, V1);
32648 unsigned AndOpcode =
32649 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
32650 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
32651 return DAG.getBitcast(RootVT, Res);
32654 // If we have a single input shuffle with different shuffle patterns in the
32655 // the 128-bit lanes use the variable mask to VPERMILPS.
32656 // TODO Combine other mask types at higher depths.
32657 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32658 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
32659 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
32660 SmallVector<SDValue, 16> VPermIdx;
32661 for (int M : Mask) {
32663 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
32664 VPermIdx.push_back(Idx);
32666 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
32667 Res = DAG.getBitcast(MaskVT, V1);
32668 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
32669 return DAG.getBitcast(RootVT, Res);
32672 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
32673 // to VPERMIL2PD/VPERMIL2PS.
32674 if (AllowVariableMask && Subtarget.hasXOP() &&
32675 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
32676 MaskVT == MVT::v8f32)) {
32677 // VPERMIL2 Operation.
32678 // Bits[3] - Match Bit.
32679 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
32680 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
32681 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
32682 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
32683 SmallVector<int, 8> VPerm2Idx;
32684 unsigned M2ZImm = 0;
32685 for (int M : Mask) {
32686 if (M == SM_SentinelUndef) {
32687 VPerm2Idx.push_back(-1);
32690 if (M == SM_SentinelZero) {
32692 VPerm2Idx.push_back(8);
32695 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
32696 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
32697 VPerm2Idx.push_back(Index);
32699 V1 = DAG.getBitcast(MaskVT, V1);
32700 V2 = DAG.getBitcast(MaskVT, V2);
32701 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
32702 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
32703 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
32704 return DAG.getBitcast(RootVT, Res);
32707 // If we have 3 or more shuffle instructions or a chain involving a variable
32708 // mask, we can replace them with a single PSHUFB instruction profitably.
32709 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
32710 // instructions, but in practice PSHUFB tends to be *very* fast so we're
32711 // more aggressive.
32712 if (UnaryShuffle && AllowVariableMask &&
32713 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32714 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
32715 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
32716 SmallVector<SDValue, 16> PSHUFBMask;
32717 int NumBytes = RootVT.getSizeInBits() / 8;
32718 int Ratio = NumBytes / NumMaskElts;
32719 for (int i = 0; i < NumBytes; ++i) {
32720 int M = Mask[i / Ratio];
32721 if (M == SM_SentinelUndef) {
32722 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
32725 if (M == SM_SentinelZero) {
32726 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
32729 M = Ratio * M + i % Ratio;
32730 assert((M / 16) == (i / 16) && "Lane crossing detected");
32731 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32733 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
32734 Res = DAG.getBitcast(ByteVT, V1);
32735 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
32736 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
32737 return DAG.getBitcast(RootVT, Res);
32740 // With XOP, if we have a 128-bit binary input shuffle we can always combine
32741 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
32742 // slower than PSHUFB on targets that support both.
32743 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
32744 // VPPERM Mask Operation
32745 // Bits[4:0] - Byte Index (0 - 31)
32746 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
32747 SmallVector<SDValue, 16> VPPERMMask;
32749 int Ratio = NumBytes / NumMaskElts;
32750 for (int i = 0; i < NumBytes; ++i) {
32751 int M = Mask[i / Ratio];
32752 if (M == SM_SentinelUndef) {
32753 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
32756 if (M == SM_SentinelZero) {
32757 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
32760 M = Ratio * M + i % Ratio;
32761 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32763 MVT ByteVT = MVT::v16i8;
32764 V1 = DAG.getBitcast(ByteVT, V1);
32765 V2 = DAG.getBitcast(ByteVT, V2);
32766 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
32767 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
32768 return DAG.getBitcast(RootVT, Res);
32771 // If that failed and either input is extracted then try to combine as a
32772 // shuffle with the larger type.
32773 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32774 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32776 return WideShuffle;
32778 // If we have a dual input shuffle then lower to VPERMV3.
32779 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32780 ((Subtarget.hasAVX512() &&
32781 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32782 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32783 (Subtarget.hasVLX() &&
32784 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
32785 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
32786 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32787 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32788 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
32789 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
32790 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32791 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
32792 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
32793 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32794 V1 = DAG.getBitcast(MaskVT, V1);
32795 V2 = DAG.getBitcast(MaskVT, V2);
32796 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32797 return DAG.getBitcast(RootVT, Res);
32800 // Failed to find any combines.
32804 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
32805 // instruction if possible.
32807 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
32808 // type size to attempt to combine:
32809 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
32811 // extract_subvector(shuffle(x,y,m2),0)
32812 static SDValue combineX86ShuffleChainWithExtract(
32813 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32814 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32815 const X86Subtarget &Subtarget) {
32816 unsigned NumMaskElts = BaseMask.size();
32817 unsigned NumInputs = Inputs.size();
32818 if (NumInputs == 0)
32821 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
32822 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
32824 // Peek through subvectors.
32825 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
32826 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
32827 for (unsigned i = 0; i != NumInputs; ++i) {
32828 SDValue &Src = WideInputs[i];
32829 unsigned &Offset = Offsets[i];
32830 Src = peekThroughBitcasts(Src);
32831 EVT BaseVT = Src.getValueType();
32832 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
32833 isa<ConstantSDNode>(Src.getOperand(1))) {
32834 Offset += Src.getConstantOperandVal(1);
32835 Src = Src.getOperand(0);
32837 WideSizeInBits = std::max(WideSizeInBits, Src.getValueSizeInBits());
32838 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
32839 "Unexpected subvector extraction");
32840 Offset /= BaseVT.getVectorNumElements();
32841 Offset *= NumMaskElts;
32844 // Bail if we're always extracting from the lowest subvectors,
32845 // combineX86ShuffleChain should match this for the current width.
32846 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
32849 EVT RootVT = Root.getValueType();
32850 unsigned RootSizeInBits = RootVT.getSizeInBits();
32851 unsigned Scale = WideSizeInBits / RootSizeInBits;
32852 assert((WideSizeInBits % RootSizeInBits) == 0 &&
32853 "Unexpected subvector extraction");
32855 // If the src vector types aren't the same, see if we can extend
32856 // them to match each other.
32857 // TODO: Support different scalar types?
32858 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
32859 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
32860 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
32861 Op.getValueType().getScalarType() != WideSVT;
32865 for (SDValue &NewInput : WideInputs) {
32866 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
32867 "Shuffle vector size mismatch");
32868 if (WideSizeInBits > NewInput.getValueSizeInBits())
32869 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
32870 SDLoc(NewInput), WideSizeInBits);
32871 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
32872 "Unexpected subvector extraction");
32875 // Create new mask for larger type.
32876 for (unsigned i = 1; i != NumInputs; ++i)
32877 Offsets[i] += i * Scale * NumMaskElts;
32879 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
32880 for (int &M : WideMask) {
32883 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
32885 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
32887 // Remove unused/repeated shuffle source ops.
32888 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
32889 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
32891 if (WideInputs.size() > 2)
32894 // Increase depth for every upper subvector we've peeked through.
32895 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
32897 // Attempt to combine wider chain.
32898 // TODO: Can we use a better Root?
32899 SDValue WideRoot = WideInputs[0];
32900 if (SDValue WideShuffle = combineX86ShuffleChain(
32901 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
32902 AllowVariableMask, DAG, Subtarget)) {
32904 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
32905 return DAG.getBitcast(RootVT, WideShuffle);
32910 // Attempt to constant fold all of the constant source ops.
32911 // Returns true if the entire shuffle is folded to a constant.
32912 // TODO: Extend this to merge multiple constant Ops and update the mask.
32913 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
32914 ArrayRef<int> Mask, SDValue Root,
32915 bool HasVariableMask,
32917 const X86Subtarget &Subtarget) {
32918 MVT VT = Root.getSimpleValueType();
32920 unsigned SizeInBits = VT.getSizeInBits();
32921 unsigned NumMaskElts = Mask.size();
32922 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
32923 unsigned NumOps = Ops.size();
32925 // Extract constant bits from each source op.
32926 bool OneUseConstantOp = false;
32927 SmallVector<APInt, 16> UndefEltsOps(NumOps);
32928 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
32929 for (unsigned i = 0; i != NumOps; ++i) {
32930 SDValue SrcOp = Ops[i];
32931 OneUseConstantOp |= SrcOp.hasOneUse();
32932 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
32937 // Only fold if at least one of the constants is only used once or
32938 // the combined shuffle has included a variable mask shuffle, this
32939 // is to avoid constant pool bloat.
32940 if (!OneUseConstantOp && !HasVariableMask)
32943 // Shuffle the constant bits according to the mask.
32944 APInt UndefElts(NumMaskElts, 0);
32945 APInt ZeroElts(NumMaskElts, 0);
32946 APInt ConstantElts(NumMaskElts, 0);
32947 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
32948 APInt::getNullValue(MaskSizeInBits));
32949 for (unsigned i = 0; i != NumMaskElts; ++i) {
32951 if (M == SM_SentinelUndef) {
32952 UndefElts.setBit(i);
32954 } else if (M == SM_SentinelZero) {
32955 ZeroElts.setBit(i);
32958 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
32960 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
32961 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
32963 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
32964 if (SrcUndefElts[SrcMaskIdx]) {
32965 UndefElts.setBit(i);
32969 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
32970 APInt &Bits = SrcEltBits[SrcMaskIdx];
32972 ZeroElts.setBit(i);
32976 ConstantElts.setBit(i);
32977 ConstantBitData[i] = Bits;
32979 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
32981 // Create the constant data.
32983 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
32984 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
32986 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
32988 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
32991 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
32992 return DAG.getBitcast(VT, CstOp);
32995 /// Fully generic combining of x86 shuffle instructions.
32997 /// This should be the last combine run over the x86 shuffle instructions. Once
32998 /// they have been fully optimized, this will recursively consider all chains
32999 /// of single-use shuffle instructions, build a generic model of the cumulative
33000 /// shuffle operation, and check for simpler instructions which implement this
33001 /// operation. We use this primarily for two purposes:
33003 /// 1) Collapse generic shuffles to specialized single instructions when
33004 /// equivalent. In most cases, this is just an encoding size win, but
33005 /// sometimes we will collapse multiple generic shuffles into a single
33006 /// special-purpose shuffle.
33007 /// 2) Look for sequences of shuffle instructions with 3 or more total
33008 /// instructions, and replace them with the slightly more expensive SSSE3
33009 /// PSHUFB instruction if available. We do this as the last combining step
33010 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
33011 /// a suitable short sequence of other instructions. The PSHUFB will either
33012 /// use a register or have to read from memory and so is slightly (but only
33013 /// slightly) more expensive than the other shuffle instructions.
33015 /// Because this is inherently a quadratic operation (for each shuffle in
33016 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
33017 /// This should never be an issue in practice as the shuffle lowering doesn't
33018 /// produce sequences of more than 8 instructions.
33020 /// FIXME: We will currently miss some cases where the redundant shuffling
33021 /// would simplify under the threshold for PSHUFB formation because of
33022 /// combine-ordering. To fix this, we should do the redundant instruction
33023 /// combining in this recursive walk.
33024 static SDValue combineX86ShufflesRecursively(
33025 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
33026 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
33027 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33028 const X86Subtarget &Subtarget) {
33029 // Bound the depth of our recursive combine because this is ultimately
33030 // quadratic in nature.
33031 const unsigned MaxRecursionDepth = 8;
33032 if (Depth >= MaxRecursionDepth)
33035 // Directly rip through bitcasts to find the underlying operand.
33036 SDValue Op = SrcOps[SrcOpIndex];
33037 Op = peekThroughOneUseBitcasts(Op);
33039 MVT VT = Op.getSimpleValueType();
33040 if (!VT.isVector())
33041 return SDValue(); // Bail if we hit a non-vector.
33043 assert(Root.getSimpleValueType().isVector() &&
33044 "Shuffles operate on vector types!");
33045 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
33046 "Can only combine shuffles of the same vector register size.");
33048 // Extract target shuffle mask and resolve sentinels and inputs.
33049 // TODO - determine Op's demanded elts from RootMask.
33050 SmallVector<int, 64> OpMask;
33051 SmallVector<SDValue, 2> OpInputs;
33052 APInt OpUndef, OpZero;
33053 APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
33054 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
33055 if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
33056 OpZero, DAG, Depth, false))
33059 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
33061 // Add the inputs to the Ops list, avoiding duplicates.
33062 SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
33064 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
33065 // Attempt to find an existing match.
33066 SDValue InputBC = peekThroughBitcasts(Input);
33067 for (int i = 0, e = Ops.size(); i < e; ++i)
33068 if (InputBC == peekThroughBitcasts(Ops[i]))
33070 // Match failed - should we replace an existing Op?
33071 if (InsertionPoint >= 0) {
33072 Ops[InsertionPoint] = Input;
33073 return InsertionPoint;
33075 // Add to the end of the Ops list.
33076 Ops.push_back(Input);
33077 return Ops.size() - 1;
33080 SmallVector<int, 2> OpInputIdx;
33081 for (SDValue OpInput : OpInputs)
33082 OpInputIdx.push_back(AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
33084 assert(((RootMask.size() > OpMask.size() &&
33085 RootMask.size() % OpMask.size() == 0) ||
33086 (OpMask.size() > RootMask.size() &&
33087 OpMask.size() % RootMask.size() == 0) ||
33088 OpMask.size() == RootMask.size()) &&
33089 "The smaller number of elements must divide the larger.");
33091 // This function can be performance-critical, so we rely on the power-of-2
33092 // knowledge that we have about the mask sizes to replace div/rem ops with
33093 // bit-masks and shifts.
33094 assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
33095 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
33096 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
33097 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
33099 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
33100 unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
33101 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
33102 assert((RootRatio == 1 || OpRatio == 1) &&
33103 "Must not have a ratio for both incoming and op masks!");
33105 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
33106 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
33107 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
33108 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
33109 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
33111 SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
33113 // Merge this shuffle operation's mask into our accumulated mask. Note that
33114 // this shuffle's mask will be the first applied to the input, followed by the
33115 // root mask to get us all the way to the root value arrangement. The reason
33116 // for this order is that we are recursing up the operation chain.
33117 for (unsigned i = 0; i < MaskWidth; ++i) {
33118 unsigned RootIdx = i >> RootRatioLog2;
33119 if (RootMask[RootIdx] < 0) {
33120 // This is a zero or undef lane, we're done.
33121 Mask[i] = RootMask[RootIdx];
33125 unsigned RootMaskedIdx =
33127 ? RootMask[RootIdx]
33128 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
33130 // Just insert the scaled root mask value if it references an input other
33131 // than the SrcOp we're currently inserting.
33132 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
33133 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
33134 Mask[i] = RootMaskedIdx;
33138 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
33139 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
33140 if (OpMask[OpIdx] < 0) {
33141 // The incoming lanes are zero or undef, it doesn't matter which ones we
33143 Mask[i] = OpMask[OpIdx];
33147 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
33148 unsigned OpMaskedIdx =
33151 : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
33153 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
33154 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
33155 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
33156 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
33158 Mask[i] = OpMaskedIdx;
33161 // Remove unused/repeated shuffle source ops.
33162 resolveTargetShuffleInputsAndMask(Ops, Mask);
33164 // Handle the all undef/zero cases early.
33165 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
33166 return DAG.getUNDEF(Root.getValueType());
33168 // TODO - should we handle the mixed zero/undef case as well? Just returning
33169 // a zero mask will lose information on undef elements possibly reducing
33170 // future combine possibilities.
33171 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
33172 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
33175 assert(!Ops.empty() && "Shuffle with no inputs detected");
33176 HasVariableMask |= IsOpVariableMask;
33178 // Update the list of shuffle nodes that have been combined so far.
33179 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
33181 CombinedNodes.push_back(Op.getNode());
33183 // See if we can recurse into each shuffle source op (if it's a target
33184 // shuffle). The source op should only be generally combined if it either has
33185 // a single use (i.e. current Op) or all its users have already been combined,
33186 // if not then we can still combine but should prevent generation of variable
33187 // shuffles to avoid constant pool bloat.
33188 // Don't recurse if we already have more source ops than we can combine in
33189 // the remaining recursion depth.
33190 if (Ops.size() < (MaxRecursionDepth - Depth)) {
33191 for (int i = 0, e = Ops.size(); i < e; ++i) {
33192 bool AllowVar = false;
33193 if (Ops[i].getNode()->hasOneUse() ||
33194 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
33195 AllowVar = AllowVariableMask;
33196 if (SDValue Res = combineX86ShufflesRecursively(
33197 Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
33198 AllowVar, DAG, Subtarget))
33203 // Attempt to constant fold all of the constant source ops.
33204 if (SDValue Cst = combineX86ShufflesConstants(
33205 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
33208 // We can only combine unary and binary shuffle mask cases.
33209 if (Ops.size() <= 2) {
33210 // Minor canonicalization of the accumulated shuffle mask to make it easier
33211 // to match below. All this does is detect masks with sequential pairs of
33212 // elements, and shrink them to the half-width mask. It does this in a loop
33213 // so it will reduce the size of the mask to the minimal width mask which
33214 // performs an equivalent shuffle.
33215 SmallVector<int, 64> WidenedMask;
33216 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
33217 Mask = std::move(WidenedMask);
33220 // Canonicalization of binary shuffle masks to improve pattern matching by
33221 // commuting the inputs.
33222 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
33223 ShuffleVectorSDNode::commuteMask(Mask);
33224 std::swap(Ops[0], Ops[1]);
33227 // Finally, try to combine into a single shuffle instruction.
33228 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
33229 AllowVariableMask, DAG, Subtarget);
33232 // If that failed and any input is extracted then try to combine as a
33233 // shuffle with the larger type.
33234 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
33235 HasVariableMask, AllowVariableMask,
33239 /// Helper entry wrapper to combineX86ShufflesRecursively.
33240 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
33241 const X86Subtarget &Subtarget) {
33242 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
33243 /*HasVarMask*/ false,
33244 /*AllowVarMask*/ true, DAG, Subtarget);
33247 /// Get the PSHUF-style mask from PSHUF node.
33249 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
33250 /// PSHUF-style masks that can be reused with such instructions.
33251 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
33252 MVT VT = N.getSimpleValueType();
33253 SmallVector<int, 4> Mask;
33254 SmallVector<SDValue, 2> Ops;
33257 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
33261 // If we have more than 128-bits, only the low 128-bits of shuffle mask
33262 // matter. Check that the upper masks are repeats and remove them.
33263 if (VT.getSizeInBits() > 128) {
33264 int LaneElts = 128 / VT.getScalarSizeInBits();
33266 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
33267 for (int j = 0; j < LaneElts; ++j)
33268 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
33269 "Mask doesn't repeat in high 128-bit lanes!");
33271 Mask.resize(LaneElts);
33274 switch (N.getOpcode()) {
33275 case X86ISD::PSHUFD:
33277 case X86ISD::PSHUFLW:
33280 case X86ISD::PSHUFHW:
33281 Mask.erase(Mask.begin(), Mask.begin() + 4);
33282 for (int &M : Mask)
33286 llvm_unreachable("No valid shuffle instruction found!");
33290 /// Search for a combinable shuffle across a chain ending in pshufd.
33292 /// We walk up the chain and look for a combinable shuffle, skipping over
33293 /// shuffles that we could hoist this shuffle's transformation past without
33294 /// altering anything.
33296 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
33297 SelectionDAG &DAG) {
33298 assert(N.getOpcode() == X86ISD::PSHUFD &&
33299 "Called with something other than an x86 128-bit half shuffle!");
33302 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
33303 // of the shuffles in the chain so that we can form a fresh chain to replace
33305 SmallVector<SDValue, 8> Chain;
33306 SDValue V = N.getOperand(0);
33307 for (; V.hasOneUse(); V = V.getOperand(0)) {
33308 switch (V.getOpcode()) {
33310 return SDValue(); // Nothing combined!
33313 // Skip bitcasts as we always know the type for the target specific
33317 case X86ISD::PSHUFD:
33318 // Found another dword shuffle.
33321 case X86ISD::PSHUFLW:
33322 // Check that the low words (being shuffled) are the identity in the
33323 // dword shuffle, and the high words are self-contained.
33324 if (Mask[0] != 0 || Mask[1] != 1 ||
33325 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
33328 Chain.push_back(V);
33331 case X86ISD::PSHUFHW:
33332 // Check that the high words (being shuffled) are the identity in the
33333 // dword shuffle, and the low words are self-contained.
33334 if (Mask[2] != 2 || Mask[3] != 3 ||
33335 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
33338 Chain.push_back(V);
33341 case X86ISD::UNPCKL:
33342 case X86ISD::UNPCKH:
33343 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
33344 // shuffle into a preceding word shuffle.
33345 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
33346 V.getSimpleValueType().getVectorElementType() != MVT::i16)
33349 // Search for a half-shuffle which we can combine with.
33350 unsigned CombineOp =
33351 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
33352 if (V.getOperand(0) != V.getOperand(1) ||
33353 !V->isOnlyUserOf(V.getOperand(0).getNode()))
33355 Chain.push_back(V);
33356 V = V.getOperand(0);
33358 switch (V.getOpcode()) {
33360 return SDValue(); // Nothing to combine.
33362 case X86ISD::PSHUFLW:
33363 case X86ISD::PSHUFHW:
33364 if (V.getOpcode() == CombineOp)
33367 Chain.push_back(V);
33371 V = V.getOperand(0);
33375 } while (V.hasOneUse());
33378 // Break out of the loop if we break out of the switch.
33382 if (!V.hasOneUse())
33383 // We fell out of the loop without finding a viable combining instruction.
33386 // Merge this node's mask and our incoming mask.
33387 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33388 for (int &M : Mask)
33390 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
33391 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
33393 // Rebuild the chain around this new shuffle.
33394 while (!Chain.empty()) {
33395 SDValue W = Chain.pop_back_val();
33397 if (V.getValueType() != W.getOperand(0).getValueType())
33398 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
33400 switch (W.getOpcode()) {
33402 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
33404 case X86ISD::UNPCKL:
33405 case X86ISD::UNPCKH:
33406 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
33409 case X86ISD::PSHUFD:
33410 case X86ISD::PSHUFLW:
33411 case X86ISD::PSHUFHW:
33412 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
33416 if (V.getValueType() != N.getValueType())
33417 V = DAG.getBitcast(N.getValueType(), V);
33419 // Return the new chain to replace N.
33423 /// Try to combine x86 target specific shuffles.
33424 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
33425 TargetLowering::DAGCombinerInfo &DCI,
33426 const X86Subtarget &Subtarget) {
33428 MVT VT = N.getSimpleValueType();
33429 SmallVector<int, 4> Mask;
33430 unsigned Opcode = N.getOpcode();
33432 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
33433 // single instruction.
33434 if (VT.getScalarSizeInBits() == 64 &&
33435 (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
33436 Opcode == X86ISD::UNPCKL)) {
33437 auto BC0 = peekThroughBitcasts(N.getOperand(0));
33438 auto BC1 = peekThroughBitcasts(N.getOperand(1));
33439 EVT VT0 = BC0.getValueType();
33440 EVT VT1 = BC1.getValueType();
33441 unsigned Opcode0 = BC0.getOpcode();
33442 unsigned Opcode1 = BC1.getOpcode();
33443 if (Opcode0 == Opcode1 && VT0 == VT1 &&
33444 (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
33445 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
33446 Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
33448 if (Opcode == X86ISD::MOVSD) {
33449 Lo = BC1.getOperand(0);
33450 Hi = BC0.getOperand(1);
33452 Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33453 Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33455 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
33456 return DAG.getBitcast(VT, Horiz);
33461 case X86ISD::VBROADCAST: {
33462 SDValue Src = N.getOperand(0);
33463 SDValue BC = peekThroughBitcasts(Src);
33464 EVT SrcVT = Src.getValueType();
33465 EVT BCVT = BC.getValueType();
33467 // If broadcasting from another shuffle, attempt to simplify it.
33468 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
33469 if (isTargetShuffle(BC.getOpcode()) &&
33470 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
33471 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
33472 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
33474 for (unsigned i = 0; i != Scale; ++i)
33475 DemandedMask[i] = i;
33476 if (SDValue Res = combineX86ShufflesRecursively(
33477 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
33478 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
33479 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33480 DAG.getBitcast(SrcVT, Res));
33483 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
33484 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
33485 if (Src.getOpcode() == ISD::BITCAST &&
33486 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
33487 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
33488 VT.getVectorNumElements());
33489 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
33492 // Reduce broadcast source vector to lowest 128-bits.
33493 if (SrcVT.getSizeInBits() > 128)
33494 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33495 extract128BitVector(Src, 0, DAG, DL));
33497 // broadcast(scalar_to_vector(x)) -> broadcast(x).
33498 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
33499 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
33501 // Share broadcast with the longest vector and extract low subvector (free).
33502 for (SDNode *User : Src->uses())
33503 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
33504 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
33505 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
33506 VT.getSizeInBits());
33509 // vbroadcast(scalarload X) -> vbroadcast_load X
33510 // For float loads, extract other uses of the scalar from the broadcast.
33511 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
33512 ISD::isNormalLoad(Src.getNode())) {
33513 LoadSDNode *LN = cast<LoadSDNode>(Src);
33514 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33515 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
33517 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
33518 LN->getMemoryVT(), LN->getMemOperand());
33519 // If the load value is used only by N, replace it via CombineTo N.
33520 bool NoReplaceExtract = Src.hasOneUse();
33521 DCI.CombineTo(N.getNode(), BcastLd);
33522 if (NoReplaceExtract) {
33523 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
33524 DCI.recursivelyDeleteUnusedNodes(LN);
33526 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
33527 DAG.getIntPtrConstant(0, DL));
33528 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
33530 return N; // Return N so it doesn't get rechecked!
33535 case X86ISD::BLENDI: {
33536 SDValue N0 = N.getOperand(0);
33537 SDValue N1 = N.getOperand(1);
33539 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
33540 // TODO: Handle MVT::v16i16 repeated blend mask.
33541 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
33542 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
33543 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
33544 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
33545 SrcVT.getScalarSizeInBits() >= 32) {
33546 unsigned BlendMask = N.getConstantOperandVal(2);
33547 unsigned Size = VT.getVectorNumElements();
33548 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
33549 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
33550 return DAG.getBitcast(
33551 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
33553 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
33558 case X86ISD::VPERMI: {
33559 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
33560 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
33561 SDValue N0 = N.getOperand(0);
33562 SDValue N1 = N.getOperand(1);
33563 unsigned EltSizeInBits = VT.getScalarSizeInBits();
33564 if (N0.getOpcode() == ISD::BITCAST &&
33565 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
33566 SDValue Src = N0.getOperand(0);
33567 EVT SrcVT = Src.getValueType();
33568 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
33569 return DAG.getBitcast(VT, Res);
33573 case X86ISD::PSHUFD:
33574 case X86ISD::PSHUFLW:
33575 case X86ISD::PSHUFHW:
33576 Mask = getPSHUFShuffleMask(N);
33577 assert(Mask.size() == 4);
33579 case X86ISD::MOVSD:
33580 case X86ISD::MOVSS: {
33581 SDValue N0 = N.getOperand(0);
33582 SDValue N1 = N.getOperand(1);
33584 // Canonicalize scalar FPOps:
33585 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
33586 // If commutable, allow OP(N1[0], N0[0]).
33587 unsigned Opcode1 = N1.getOpcode();
33588 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
33589 Opcode1 == ISD::FDIV) {
33590 SDValue N10 = N1.getOperand(0);
33591 SDValue N11 = N1.getOperand(1);
33593 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
33595 std::swap(N10, N11);
33596 MVT SVT = VT.getVectorElementType();
33597 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
33598 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
33599 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
33600 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
33601 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
33602 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
33608 case X86ISD::INSERTPS: {
33609 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
33610 SDValue Op0 = N.getOperand(0);
33611 SDValue Op1 = N.getOperand(1);
33612 SDValue Op2 = N.getOperand(2);
33613 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
33614 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
33615 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
33616 unsigned ZeroMask = InsertPSMask & 0xF;
33618 // If we zero out all elements from Op0 then we don't need to reference it.
33619 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
33620 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
33621 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33623 // If we zero out the element from Op1 then we don't need to reference it.
33624 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
33625 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33626 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33628 // Attempt to merge insertps Op1 with an inner target shuffle node.
33629 SmallVector<int, 8> TargetMask1;
33630 SmallVector<SDValue, 2> Ops1;
33631 APInt KnownUndef1, KnownZero1;
33632 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
33634 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
33635 // Zero/UNDEF insertion - zero out element and remove dependency.
33636 InsertPSMask |= (1u << DstIdx);
33637 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33638 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33640 // Update insertps mask srcidx and reference the source input directly.
33641 int M = TargetMask1[SrcIdx];
33642 assert(0 <= M && M < 8 && "Shuffle index out of range");
33643 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
33644 Op1 = Ops1[M < 4 ? 0 : 1];
33645 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33646 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33649 // Attempt to merge insertps Op0 with an inner target shuffle node.
33650 SmallVector<int, 8> TargetMask0;
33651 SmallVector<SDValue, 2> Ops0;
33652 APInt KnownUndef0, KnownZero0;
33653 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
33655 bool Updated = false;
33656 bool UseInput00 = false;
33657 bool UseInput01 = false;
33658 for (int i = 0; i != 4; ++i) {
33659 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
33660 // No change if element is already zero or the inserted element.
33662 } else if (KnownUndef0[i] || KnownZero0[i]) {
33663 // If the target mask is undef/zero then we must zero the element.
33664 InsertPSMask |= (1u << i);
33669 // The input vector element must be inline.
33670 int M = TargetMask0[i];
33671 if (M != i && M != (i + 4))
33674 // Determine which inputs of the target shuffle we're using.
33675 UseInput00 |= (0 <= M && M < 4);
33676 UseInput01 |= (4 <= M);
33679 // If we're not using both inputs of the target shuffle then use the
33680 // referenced input directly.
33681 if (UseInput00 && !UseInput01) {
33684 } else if (!UseInput00 && UseInput01) {
33690 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33691 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33694 // If we're inserting an element from a vbroadcast load, fold the
33695 // load into the X86insertps instruction. We need to convert the scalar
33696 // load to a vector and clear the source lane of the INSERTPS control.
33697 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
33698 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
33699 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
33700 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
33701 MemIntr->getBasePtr(),
33702 MemIntr->getMemOperand());
33703 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
33704 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
33706 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
33707 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
33718 // Nuke no-op shuffles that show up after combining.
33719 if (isNoopShuffleMask(Mask))
33720 return N.getOperand(0);
33722 // Look for simplifications involving one or two shuffle instructions.
33723 SDValue V = N.getOperand(0);
33724 switch (N.getOpcode()) {
33727 case X86ISD::PSHUFLW:
33728 case X86ISD::PSHUFHW:
33729 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
33731 // See if this reduces to a PSHUFD which is no more expensive and can
33732 // combine with more operations. Note that it has to at least flip the
33733 // dwords as otherwise it would have been removed as a no-op.
33734 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
33735 int DMask[] = {0, 1, 2, 3};
33736 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
33737 DMask[DOffset + 0] = DOffset + 1;
33738 DMask[DOffset + 1] = DOffset + 0;
33739 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
33740 V = DAG.getBitcast(DVT, V);
33741 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
33742 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
33743 return DAG.getBitcast(VT, V);
33746 // Look for shuffle patterns which can be implemented as a single unpack.
33747 // FIXME: This doesn't handle the location of the PSHUFD generically, and
33748 // only works when we have a PSHUFD followed by two half-shuffles.
33749 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
33750 (V.getOpcode() == X86ISD::PSHUFLW ||
33751 V.getOpcode() == X86ISD::PSHUFHW) &&
33752 V.getOpcode() != N.getOpcode() &&
33754 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
33755 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
33756 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33757 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
33758 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33759 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33761 for (int i = 0; i < 4; ++i) {
33762 WordMask[i + NOffset] = Mask[i] + NOffset;
33763 WordMask[i + VOffset] = VMask[i] + VOffset;
33765 // Map the word mask through the DWord mask.
33767 for (int i = 0; i < 8; ++i)
33768 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
33769 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
33770 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
33771 // We can replace all three shuffles with an unpack.
33772 V = DAG.getBitcast(VT, D.getOperand(0));
33773 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
33782 case X86ISD::PSHUFD:
33783 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
33792 /// Checks if the shuffle mask takes subsequent elements
33793 /// alternately from two vectors.
33794 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
33795 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
33797 int ParitySrc[2] = {-1, -1};
33798 unsigned Size = Mask.size();
33799 for (unsigned i = 0; i != Size; ++i) {
33804 // Make sure we are using the matching element from the input.
33805 if ((M % Size) != i)
33808 // Make sure we use the same input for all elements of the same parity.
33809 int Src = M / Size;
33810 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
33812 ParitySrc[i % 2] = Src;
33815 // Make sure each input is used.
33816 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
33819 Op0Even = ParitySrc[0] == 0;
33823 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
33824 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
33825 /// are written to the parameters \p Opnd0 and \p Opnd1.
33827 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
33828 /// so it is easier to generically match. We also insert dummy vector shuffle
33829 /// nodes for the operands which explicitly discard the lanes which are unused
33830 /// by this operation to try to flow through the rest of the combiner
33831 /// the fact that they're unused.
33832 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
33833 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
33836 EVT VT = N->getValueType(0);
33837 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33838 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
33839 !VT.getSimpleVT().isFloatingPoint())
33842 // We only handle target-independent shuffles.
33843 // FIXME: It would be easy and harmless to use the target shuffle mask
33844 // extraction tool to support more.
33845 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33848 SDValue V1 = N->getOperand(0);
33849 SDValue V2 = N->getOperand(1);
33851 // Make sure we have an FADD and an FSUB.
33852 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
33853 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
33854 V1.getOpcode() == V2.getOpcode())
33857 // If there are other uses of these operations we can't fold them.
33858 if (!V1->hasOneUse() || !V2->hasOneUse())
33861 // Ensure that both operations have the same operands. Note that we can
33862 // commute the FADD operands.
33864 if (V1.getOpcode() == ISD::FSUB) {
33865 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
33866 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
33867 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
33870 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
33871 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
33872 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
33873 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
33877 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33879 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33882 // It's a subadd if the vector in the even parity is an FADD.
33883 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
33884 : V2->getOpcode() == ISD::FADD;
33891 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
33892 static SDValue combineShuffleToFMAddSub(SDNode *N,
33893 const X86Subtarget &Subtarget,
33894 SelectionDAG &DAG) {
33895 // We only handle target-independent shuffles.
33896 // FIXME: It would be easy and harmless to use the target shuffle mask
33897 // extraction tool to support more.
33898 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33901 MVT VT = N->getSimpleValueType(0);
33902 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33903 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
33906 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
33907 SDValue Op0 = N->getOperand(0);
33908 SDValue Op1 = N->getOperand(1);
33909 SDValue FMAdd = Op0, FMSub = Op1;
33910 if (FMSub.getOpcode() != X86ISD::FMSUB)
33911 std::swap(FMAdd, FMSub);
33913 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
33914 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
33915 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
33916 FMAdd.getOperand(2) != FMSub.getOperand(2))
33919 // Check for correct shuffle mask.
33920 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33922 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33925 // FMAddSub takes zeroth operand from FMSub node.
33927 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
33928 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33929 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
33930 FMAdd.getOperand(2));
33933 /// Try to combine a shuffle into a target-specific add-sub or
33934 /// mul-add-sub node.
33935 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
33936 const X86Subtarget &Subtarget,
33937 SelectionDAG &DAG) {
33938 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
33941 SDValue Opnd0, Opnd1;
33943 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
33946 MVT VT = N->getSimpleValueType(0);
33949 // Try to generate X86ISD::FMADDSUB node here.
33951 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
33952 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33953 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
33959 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
33960 // the ADDSUB idiom has been successfully recognized. There are no known
33961 // X86 targets with 512-bit ADDSUB instructions!
33962 if (VT.is512BitVector())
33965 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
33968 // We are looking for a shuffle where both sources are concatenated with undef
33969 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
33970 // if we can express this as a single-source shuffle, that's preferable.
33971 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
33972 const X86Subtarget &Subtarget) {
33973 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
33976 EVT VT = N->getValueType(0);
33978 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
33979 if (!VT.is128BitVector() && !VT.is256BitVector())
33982 if (VT.getVectorElementType() != MVT::i32 &&
33983 VT.getVectorElementType() != MVT::i64 &&
33984 VT.getVectorElementType() != MVT::f32 &&
33985 VT.getVectorElementType() != MVT::f64)
33988 SDValue N0 = N->getOperand(0);
33989 SDValue N1 = N->getOperand(1);
33991 // Check that both sources are concats with undef.
33992 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
33993 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
33994 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
33995 !N1.getOperand(1).isUndef())
33998 // Construct the new shuffle mask. Elements from the first source retain their
33999 // index, but elements from the second source no longer need to skip an undef.
34000 SmallVector<int, 8> Mask;
34001 int NumElts = VT.getVectorNumElements();
34003 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
34004 for (int Elt : SVOp->getMask())
34005 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
34008 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
34010 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
34013 /// Eliminate a redundant shuffle of a horizontal math op.
34014 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
34015 unsigned Opcode = N->getOpcode();
34016 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
34017 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
34020 // For a broadcast, peek through an extract element of index 0 to find the
34021 // horizontal op: broadcast (ext_vec_elt HOp, 0)
34022 EVT VT = N->getValueType(0);
34023 if (Opcode == X86ISD::VBROADCAST) {
34024 SDValue SrcOp = N->getOperand(0);
34025 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
34026 SrcOp.getValueType() == MVT::f64 &&
34027 SrcOp.getOperand(0).getValueType() == VT &&
34028 isNullConstant(SrcOp.getOperand(1)))
34029 N = SrcOp.getNode();
34032 SDValue HOp = N->getOperand(0);
34033 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
34034 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
34037 // 128-bit horizontal math instructions are defined to operate on adjacent
34038 // lanes of each operand as:
34039 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
34040 // ...similarly for v2f64 and v8i16.
34041 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
34042 HOp.getOperand(0) != HOp.getOperand(1))
34045 // The shuffle that we are eliminating may have allowed the horizontal op to
34046 // have an undemanded (undefined) operand. Duplicate the other (defined)
34047 // operand to ensure that the results are defined across all lanes without the
34049 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
34051 if (HorizOp.getOperand(0).isUndef()) {
34052 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
34053 X = HorizOp.getOperand(1);
34054 } else if (HorizOp.getOperand(1).isUndef()) {
34055 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
34056 X = HorizOp.getOperand(0);
34060 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
34061 HorizOp.getValueType(), X, X);
34064 // When the operands of a horizontal math op are identical, the low half of
34065 // the result is the same as the high half. If a target shuffle is also
34066 // replicating low and high halves (and without changing the type/length of
34067 // the vector), we don't need the shuffle.
34068 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
34069 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
34070 // movddup (hadd X, X) --> hadd X, X
34071 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
34072 assert((HOp.getValueType() == MVT::v2f64 ||
34073 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
34074 return updateHOp(HOp, DAG);
34079 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
34080 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
34081 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
34082 // but this should be tied to whatever horizontal op matching and shuffle
34083 // canonicalization are producing.
34084 if (HOp.getValueSizeInBits() == 128 &&
34085 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
34086 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
34087 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
34088 return updateHOp(HOp, DAG);
34090 if (HOp.getValueSizeInBits() == 256 &&
34091 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
34092 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
34093 isTargetShuffleEquivalent(
34094 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
34095 return updateHOp(HOp, DAG);
34100 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
34101 /// low half of each source vector and does not set any high half elements in
34102 /// the destination vector, narrow the shuffle to half its original size.
34103 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
34104 if (!Shuf->getValueType(0).isSimple())
34106 MVT VT = Shuf->getSimpleValueType(0);
34107 if (!VT.is256BitVector() && !VT.is512BitVector())
34110 // See if we can ignore all of the high elements of the shuffle.
34111 ArrayRef<int> Mask = Shuf->getMask();
34112 if (!isUndefUpperHalf(Mask))
34115 // Check if the shuffle mask accesses only the low half of each input vector
34116 // (half-index output is 0 or 2).
34117 int HalfIdx1, HalfIdx2;
34118 SmallVector<int, 8> HalfMask(Mask.size() / 2);
34119 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
34120 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
34123 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
34124 // The trick is knowing that all of the insert/extract are actually free
34125 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
34126 // of narrow inputs into a narrow output, and that is always cheaper than
34127 // the wide shuffle that we started with.
34128 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
34129 Shuf->getOperand(1), HalfMask, HalfIdx1,
34130 HalfIdx2, false, DAG, /*UseConcat*/true);
34133 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
34134 TargetLowering::DAGCombinerInfo &DCI,
34135 const X86Subtarget &Subtarget) {
34136 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
34137 if (SDValue V = narrowShuffle(Shuf, DAG))
34140 // If we have legalized the vector types, look for blends of FADD and FSUB
34141 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
34143 EVT VT = N->getValueType(0);
34144 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34145 if (TLI.isTypeLegal(VT)) {
34146 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
34149 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
34153 // Attempt to combine into a vector load/broadcast.
34154 if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
34157 // For AVX2, we sometimes want to combine
34158 // (vector_shuffle <mask> (concat_vectors t1, undef)
34159 // (concat_vectors t2, undef))
34161 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
34162 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
34163 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
34166 if (isTargetShuffle(N->getOpcode())) {
34168 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
34171 // Try recursively combining arbitrary sequences of x86 shuffle
34172 // instructions into higher-order shuffles. We do this after combining
34173 // specific PSHUF instruction sequences into their minimal form so that we
34174 // can evaluate how many specialized shuffle instructions are involved in
34175 // a particular chain.
34176 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
34179 // Simplify source operands based on shuffle mask.
34180 // TODO - merge this into combineX86ShufflesRecursively.
34181 APInt KnownUndef, KnownZero;
34182 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
34183 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
34184 return SDValue(N, 0);
34187 // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
34188 // in the upper 64 bits.
34189 // TODO: Can we generalize this using computeKnownBits.
34190 if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
34191 (VT == MVT::v2f64 || VT == MVT::v2i64) &&
34192 N->getOperand(0).getOpcode() == ISD::BITCAST &&
34193 (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
34194 N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
34195 SDValue In = N->getOperand(0).getOperand(0);
34196 switch (In.getOpcode()) {
34199 case X86ISD::CVTP2SI: case X86ISD::CVTP2UI:
34200 case X86ISD::MCVTP2SI: case X86ISD::MCVTP2UI:
34201 case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI:
34202 case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
34203 case X86ISD::CVTSI2P: case X86ISD::CVTUI2P:
34204 case X86ISD::MCVTSI2P: case X86ISD::MCVTUI2P:
34205 case X86ISD::VFPROUND: case X86ISD::VMFPROUND:
34206 if (In.getOperand(0).getValueType() == MVT::v2f64 ||
34207 In.getOperand(0).getValueType() == MVT::v2i64)
34208 return N->getOperand(0); // return the bitcast
34213 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
34214 // insert into a zero vector. This helps get VZEXT_MOVL closer to
34215 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
34216 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
34217 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
34218 N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
34219 N->getOperand(0).hasOneUse() &&
34220 N->getOperand(0).getOperand(0).isUndef() &&
34221 isNullConstant(N->getOperand(0).getOperand(2))) {
34222 SDValue In = N->getOperand(0).getOperand(1);
34223 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
34224 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
34225 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
34226 Movl, N->getOperand(0).getOperand(2));
34229 // If this a vzmovl of a full vector load, replace it with a vzload, unless
34230 // the load is volatile.
34231 if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
34232 ISD::isNormalLoad(N->getOperand(0).getNode())) {
34233 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
34234 if (LN->isSimple()) {
34235 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
34236 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
34238 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34239 VT.getVectorElementType(),
34240 LN->getPointerInfo(),
34241 LN->getAlignment(),
34242 MachineMemOperand::MOLoad);
34243 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
34251 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
34252 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
34253 TargetLoweringOpt &TLO, unsigned Depth) const {
34254 int NumElts = DemandedElts.getBitWidth();
34255 unsigned Opc = Op.getOpcode();
34256 EVT VT = Op.getValueType();
34258 // Handle special case opcodes.
34260 case X86ISD::PMULDQ:
34261 case X86ISD::PMULUDQ: {
34262 APInt LHSUndef, LHSZero;
34263 APInt RHSUndef, RHSZero;
34264 SDValue LHS = Op.getOperand(0);
34265 SDValue RHS = Op.getOperand(1);
34266 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
34269 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
34272 // Multiply by zero.
34273 KnownZero = LHSZero | RHSZero;
34278 case X86ISD::VSRA: {
34279 // We only need the bottom 64-bits of the (128-bit) shift amount.
34280 SDValue Amt = Op.getOperand(1);
34281 MVT AmtVT = Amt.getSimpleValueType();
34282 assert(AmtVT.is128BitVector() && "Unexpected value type");
34284 // If we reuse the shift amount just for sse shift amounts then we know that
34285 // only the bottom 64-bits are only ever used.
34286 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
34287 unsigned UseOpc = Use->getOpcode();
34288 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
34289 UseOpc == X86ISD::VSRA) &&
34290 Use->getOperand(0) != Amt;
34293 APInt AmtUndef, AmtZero;
34294 unsigned NumAmtElts = AmtVT.getVectorNumElements();
34295 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
34296 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
34297 Depth + 1, AssumeSingleUse))
34301 case X86ISD::VSHLI:
34302 case X86ISD::VSRLI:
34303 case X86ISD::VSRAI: {
34304 SDValue Src = Op.getOperand(0);
34306 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
34309 // TODO convert SrcUndef to KnownUndef.
34312 case X86ISD::KSHIFTL: {
34313 SDValue Src = Op.getOperand(0);
34314 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
34315 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
34316 unsigned ShiftAmt = Amt->getZExtValue();
34319 return TLO.CombineTo(Op, Src);
34321 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34322 // single shift. We can do this if the bottom bits (which are shifted
34323 // out) are never demanded.
34324 if (Src.getOpcode() == X86ISD::KSHIFTR) {
34325 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
34326 unsigned C1 = Src.getConstantOperandVal(1);
34327 unsigned NewOpc = X86ISD::KSHIFTL;
34328 int Diff = ShiftAmt - C1;
34331 NewOpc = X86ISD::KSHIFTR;
34335 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
34336 return TLO.CombineTo(
34337 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
34341 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
34342 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
34346 KnownUndef <<= ShiftAmt;
34347 KnownZero <<= ShiftAmt;
34348 KnownZero.setLowBits(ShiftAmt);
34351 case X86ISD::KSHIFTR: {
34352 SDValue Src = Op.getOperand(0);
34353 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
34354 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
34355 unsigned ShiftAmt = Amt->getZExtValue();
34358 return TLO.CombineTo(Op, Src);
34360 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
34361 // single shift. We can do this if the top bits (which are shifted
34362 // out) are never demanded.
34363 if (Src.getOpcode() == X86ISD::KSHIFTL) {
34364 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
34365 unsigned C1 = Src.getConstantOperandVal(1);
34366 unsigned NewOpc = X86ISD::KSHIFTR;
34367 int Diff = ShiftAmt - C1;
34370 NewOpc = X86ISD::KSHIFTL;
34374 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
34375 return TLO.CombineTo(
34376 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
34380 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
34381 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
34385 KnownUndef.lshrInPlace(ShiftAmt);
34386 KnownZero.lshrInPlace(ShiftAmt);
34387 KnownZero.setHighBits(ShiftAmt);
34390 case X86ISD::CVTSI2P:
34391 case X86ISD::CVTUI2P: {
34392 SDValue Src = Op.getOperand(0);
34393 MVT SrcVT = Src.getSimpleValueType();
34394 APInt SrcUndef, SrcZero;
34395 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34396 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34401 case X86ISD::PACKSS:
34402 case X86ISD::PACKUS: {
34403 SDValue N0 = Op.getOperand(0);
34404 SDValue N1 = Op.getOperand(1);
34406 APInt DemandedLHS, DemandedRHS;
34407 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34409 APInt SrcUndef, SrcZero;
34410 if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
34413 if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
34417 // Aggressively peek through ops to get at the demanded elts.
34418 // TODO - we should do this for all target/faux shuffles ops.
34419 if (!DemandedElts.isAllOnesValue()) {
34420 APInt DemandedSrcBits =
34421 APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
34422 SDValue NewN0 = SimplifyMultipleUseDemandedBits(
34423 N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
34424 SDValue NewN1 = SimplifyMultipleUseDemandedBits(
34425 N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
34426 if (NewN0 || NewN1) {
34427 NewN0 = NewN0 ? NewN0 : N0;
34428 NewN1 = NewN1 ? NewN1 : N1;
34429 return TLO.CombineTo(Op,
34430 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
34437 case X86ISD::FHADD:
34438 case X86ISD::FHSUB: {
34439 APInt DemandedLHS, DemandedRHS;
34440 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34442 APInt LHSUndef, LHSZero;
34443 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
34444 LHSZero, TLO, Depth + 1))
34446 APInt RHSUndef, RHSZero;
34447 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
34448 RHSZero, TLO, Depth + 1))
34452 case X86ISD::VTRUNC:
34453 case X86ISD::VTRUNCS:
34454 case X86ISD::VTRUNCUS: {
34455 SDValue Src = Op.getOperand(0);
34456 MVT SrcVT = Src.getSimpleValueType();
34457 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34458 APInt SrcUndef, SrcZero;
34459 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
34462 KnownZero = SrcZero.zextOrTrunc(NumElts);
34463 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
34466 case X86ISD::BLENDV: {
34467 APInt SelUndef, SelZero;
34468 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
34469 SelZero, TLO, Depth + 1))
34472 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
34473 APInt LHSUndef, LHSZero;
34474 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
34475 LHSZero, TLO, Depth + 1))
34478 APInt RHSUndef, RHSZero;
34479 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
34480 RHSZero, TLO, Depth + 1))
34483 KnownZero = LHSZero & RHSZero;
34484 KnownUndef = LHSUndef & RHSUndef;
34487 case X86ISD::VBROADCAST: {
34488 SDValue Src = Op.getOperand(0);
34489 MVT SrcVT = Src.getSimpleValueType();
34490 if (!SrcVT.isVector())
34492 // Don't bother broadcasting if we just need the 0'th element.
34493 if (DemandedElts == 1) {
34494 if (Src.getValueType() != VT)
34495 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
34497 return TLO.CombineTo(Op, Src);
34499 APInt SrcUndef, SrcZero;
34500 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
34501 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34506 case X86ISD::VPERMV: {
34507 SDValue Mask = Op.getOperand(0);
34508 APInt MaskUndef, MaskZero;
34509 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34514 case X86ISD::PSHUFB:
34515 case X86ISD::VPERMV3:
34516 case X86ISD::VPERMILPV: {
34517 SDValue Mask = Op.getOperand(1);
34518 APInt MaskUndef, MaskZero;
34519 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34524 case X86ISD::VPPERM:
34525 case X86ISD::VPERMIL2: {
34526 SDValue Mask = Op.getOperand(2);
34527 APInt MaskUndef, MaskZero;
34528 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34535 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
34536 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
34537 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
34538 if ((VT.is256BitVector() || VT.is512BitVector()) &&
34539 DemandedElts.lshr(NumElts / 2) == 0) {
34540 unsigned SizeInBits = VT.getSizeInBits();
34541 unsigned ExtSizeInBits = SizeInBits / 2;
34543 // See if 512-bit ops only use the bottom 128-bits.
34544 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
34545 ExtSizeInBits = SizeInBits / 4;
34548 // Zero upper elements.
34549 case X86ISD::VZEXT_MOVL: {
34552 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34554 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
34555 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34557 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34558 return TLO.CombineTo(Op, Insert);
34560 // Subvector broadcast.
34561 case X86ISD::SUBV_BROADCAST: {
34563 SDValue Src = Op.getOperand(0);
34564 if (Src.getValueSizeInBits() > ExtSizeInBits)
34565 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
34566 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
34567 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
34569 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
34570 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
34572 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
34573 TLO.DAG, DL, ExtSizeInBits));
34575 // Byte shifts by immediate.
34576 case X86ISD::VSHLDQ:
34577 case X86ISD::VSRLDQ:
34578 // Shift by uniform.
34582 // Shift by immediate.
34583 case X86ISD::VSHLI:
34584 case X86ISD::VSRLI:
34585 case X86ISD::VSRAI: {
34588 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34590 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
34591 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34593 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34594 return TLO.CombineTo(Op, Insert);
34596 case X86ISD::VPERMI: {
34597 // Simplify PERMPD/PERMQ to extract_subvector.
34598 // TODO: This should be done in shuffle combining.
34599 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
34600 SmallVector<int, 4> Mask;
34601 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
34602 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
34604 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
34605 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34606 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
34607 return TLO.CombineTo(Op, Insert);
34612 // Target Shuffles.
34613 case X86ISD::PSHUFB:
34614 case X86ISD::UNPCKL:
34615 case X86ISD::UNPCKH:
34616 // Saturated Packs.
34617 case X86ISD::PACKSS:
34618 case X86ISD::PACKUS:
34622 case X86ISD::FHADD:
34623 case X86ISD::FHSUB: {
34625 MVT ExtVT = VT.getSimpleVT();
34626 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
34627 ExtSizeInBits / ExtVT.getScalarSizeInBits());
34629 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34631 extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
34632 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
34633 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34635 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34636 return TLO.CombineTo(Op, Insert);
34641 // Get target/faux shuffle mask.
34642 APInt OpUndef, OpZero;
34643 SmallVector<int, 64> OpMask;
34644 SmallVector<SDValue, 2> OpInputs;
34645 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
34646 OpZero, TLO.DAG, Depth, false))
34649 // Shuffle inputs must be the same size as the result.
34650 if (OpMask.size() != (unsigned)NumElts ||
34651 llvm::any_of(OpInputs, [VT](SDValue V) {
34652 return VT.getSizeInBits() != V.getValueSizeInBits() ||
34653 !V.getValueType().isVector();
34657 KnownZero = OpZero;
34658 KnownUndef = OpUndef;
34660 // Check if shuffle mask can be simplified to undef/zero/identity.
34661 int NumSrcs = OpInputs.size();
34662 for (int i = 0; i != NumElts; ++i)
34663 if (!DemandedElts[i])
34664 OpMask[i] = SM_SentinelUndef;
34666 if (isUndefInRange(OpMask, 0, NumElts)) {
34667 KnownUndef.setAllBits();
34668 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
34670 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
34671 KnownZero.setAllBits();
34672 return TLO.CombineTo(
34673 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
34675 for (int Src = 0; Src != NumSrcs; ++Src)
34676 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
34677 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
34679 // Attempt to simplify inputs.
34680 for (int Src = 0; Src != NumSrcs; ++Src) {
34681 // TODO: Support inputs of different types.
34682 if (OpInputs[Src].getValueType() != VT)
34685 int Lo = Src * NumElts;
34686 APInt SrcElts = APInt::getNullValue(NumElts);
34687 for (int i = 0; i != NumElts; ++i)
34688 if (DemandedElts[i]) {
34689 int M = OpMask[i] - Lo;
34690 if (0 <= M && M < NumElts)
34694 // TODO - Propagate input undef/zero elts.
34695 APInt SrcUndef, SrcZero;
34696 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
34704 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
34705 SDValue Op, const APInt &OriginalDemandedBits,
34706 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
34707 unsigned Depth) const {
34708 EVT VT = Op.getValueType();
34709 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
34710 unsigned Opc = Op.getOpcode();
34712 case X86ISD::PMULDQ:
34713 case X86ISD::PMULUDQ: {
34714 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
34716 SDValue LHS = Op.getOperand(0);
34717 SDValue RHS = Op.getOperand(1);
34718 // FIXME: Can we bound this better?
34719 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
34720 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
34723 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
34727 // Aggressively peek through ops to get at the demanded low bits.
34728 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
34729 LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
34730 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
34731 RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
34732 if (DemandedLHS || DemandedRHS) {
34733 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
34734 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
34735 return TLO.CombineTo(
34736 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
34740 case X86ISD::VSHLI: {
34741 SDValue Op0 = Op.getOperand(0);
34742 SDValue Op1 = Op.getOperand(1);
34744 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34745 if (ShiftImm->getAPIntValue().uge(BitWidth))
34748 unsigned ShAmt = ShiftImm->getZExtValue();
34749 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
34751 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34752 // single shift. We can do this if the bottom bits (which are shifted
34753 // out) are never demanded.
34754 if (Op0.getOpcode() == X86ISD::VSRLI &&
34755 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
34756 if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
34757 if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
34758 int Diff = ShAmt - Shift2Imm->getZExtValue();
34760 return TLO.CombineTo(Op, Op0.getOperand(0));
34762 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
34763 SDValue NewShift = TLO.DAG.getNode(
34764 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
34765 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
34766 return TLO.CombineTo(Op, NewShift);
34771 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34775 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34776 Known.Zero <<= ShAmt;
34777 Known.One <<= ShAmt;
34779 // Low bits known zero.
34780 Known.Zero.setLowBits(ShAmt);
34784 case X86ISD::VSRLI: {
34785 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
34786 if (ShiftImm->getAPIntValue().uge(BitWidth))
34789 unsigned ShAmt = ShiftImm->getZExtValue();
34790 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34792 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
34793 OriginalDemandedElts, Known, TLO, Depth + 1))
34796 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34797 Known.Zero.lshrInPlace(ShAmt);
34798 Known.One.lshrInPlace(ShAmt);
34800 // High bits known zero.
34801 Known.Zero.setHighBits(ShAmt);
34805 case X86ISD::VSRAI: {
34806 SDValue Op0 = Op.getOperand(0);
34807 SDValue Op1 = Op.getOperand(1);
34809 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34810 if (ShiftImm->getAPIntValue().uge(BitWidth))
34813 unsigned ShAmt = ShiftImm->getZExtValue();
34814 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34816 // If we just want the sign bit then we don't need to shift it.
34817 if (OriginalDemandedBits.isSignMask())
34818 return TLO.CombineTo(Op, Op0);
34820 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
34821 if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
34822 SDValue Op00 = Op0.getOperand(0);
34823 unsigned NumSignBits =
34824 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
34825 if (ShAmt < NumSignBits)
34826 return TLO.CombineTo(Op, Op00);
34829 // If any of the demanded bits are produced by the sign extension, we also
34830 // demand the input sign bit.
34831 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
34832 DemandedMask.setSignBit();
34834 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34838 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
34839 Known.Zero.lshrInPlace(ShAmt);
34840 Known.One.lshrInPlace(ShAmt);
34842 // If the input sign bit is known to be zero, or if none of the top bits
34843 // are demanded, turn this into an unsigned shift right.
34844 if (Known.Zero[BitWidth - ShAmt - 1] ||
34845 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
34846 return TLO.CombineTo(
34847 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
34849 // High bits are known one.
34850 if (Known.One[BitWidth - ShAmt - 1])
34851 Known.One.setHighBits(ShAmt);
34855 case X86ISD::PEXTRB:
34856 case X86ISD::PEXTRW: {
34857 SDValue Vec = Op.getOperand(0);
34858 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
34859 MVT VecVT = Vec.getSimpleValueType();
34860 unsigned NumVecElts = VecVT.getVectorNumElements();
34862 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
34863 unsigned Idx = CIdx->getZExtValue();
34864 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
34866 // If we demand no bits from the vector then we must have demanded
34867 // bits from the implict zext - simplify to zero.
34868 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
34869 if (DemandedVecBits == 0)
34870 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34872 APInt KnownUndef, KnownZero;
34873 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
34874 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
34875 KnownZero, TLO, Depth + 1))
34878 KnownBits KnownVec;
34879 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
34880 KnownVec, TLO, Depth + 1))
34883 if (SDValue V = SimplifyMultipleUseDemandedBits(
34884 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
34885 return TLO.CombineTo(
34886 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
34888 Known = KnownVec.zext(BitWidth, true);
34893 case X86ISD::PINSRB:
34894 case X86ISD::PINSRW: {
34895 SDValue Vec = Op.getOperand(0);
34896 SDValue Scl = Op.getOperand(1);
34897 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
34898 MVT VecVT = Vec.getSimpleValueType();
34900 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
34901 unsigned Idx = CIdx->getZExtValue();
34902 if (!OriginalDemandedElts[Idx])
34903 return TLO.CombineTo(Op, Vec);
34905 KnownBits KnownVec;
34906 APInt DemandedVecElts(OriginalDemandedElts);
34907 DemandedVecElts.clearBit(Idx);
34908 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
34909 KnownVec, TLO, Depth + 1))
34912 KnownBits KnownScl;
34913 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
34914 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
34915 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
34918 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
34919 Known.One = KnownVec.One & KnownScl.One;
34920 Known.Zero = KnownVec.Zero & KnownScl.Zero;
34925 case X86ISD::PACKSS:
34926 // PACKSS saturates to MIN/MAX integer values. So if we just want the
34927 // sign bit then we can just ask for the source operands sign bit.
34928 // TODO - add known bits handling.
34929 if (OriginalDemandedBits.isSignMask()) {
34930 APInt DemandedLHS, DemandedRHS;
34931 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
34933 KnownBits KnownLHS, KnownRHS;
34934 APInt SignMask = APInt::getSignMask(BitWidth * 2);
34935 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
34936 KnownLHS, TLO, Depth + 1))
34938 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
34939 KnownRHS, TLO, Depth + 1))
34942 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
34944 case X86ISD::PCMPGT:
34945 // icmp sgt(0, R) == ashr(R, BitWidth-1).
34946 // iff we only need the sign bit then we can use R directly.
34947 if (OriginalDemandedBits.isSignMask() &&
34948 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
34949 return TLO.CombineTo(Op, Op.getOperand(1));
34951 case X86ISD::MOVMSK: {
34952 SDValue Src = Op.getOperand(0);
34953 MVT SrcVT = Src.getSimpleValueType();
34954 unsigned SrcBits = SrcVT.getScalarSizeInBits();
34955 unsigned NumElts = SrcVT.getVectorNumElements();
34957 // If we don't need the sign bits at all just return zero.
34958 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
34959 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34961 // Only demand the vector elements of the sign bits we need.
34962 APInt KnownUndef, KnownZero;
34963 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
34964 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
34968 Known.Zero = KnownZero.zextOrSelf(BitWidth);
34969 Known.Zero.setHighBits(BitWidth - NumElts);
34971 // MOVMSK only uses the MSB from each vector element.
34972 KnownBits KnownSrc;
34973 if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
34974 KnownSrc, TLO, Depth + 1))
34977 if (KnownSrc.One[SrcBits - 1])
34978 Known.One.setLowBits(NumElts);
34979 else if (KnownSrc.Zero[SrcBits - 1])
34980 Known.Zero.setLowBits(NumElts);
34985 return TargetLowering::SimplifyDemandedBitsForTargetNode(
34986 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
34989 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
34990 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
34991 SelectionDAG &DAG, unsigned Depth) const {
34992 int NumElts = DemandedElts.getBitWidth();
34993 unsigned Opc = Op.getOpcode();
34994 EVT VT = Op.getValueType();
34997 case X86ISD::PINSRB:
34998 case X86ISD::PINSRW: {
34999 // If we don't demand the inserted element, return the base vector.
35000 SDValue Vec = Op.getOperand(0);
35001 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
35002 MVT VecVT = Vec.getSimpleValueType();
35003 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
35004 !DemandedElts[CIdx->getZExtValue()])
35010 APInt ShuffleUndef, ShuffleZero;
35011 SmallVector<int, 16> ShuffleMask;
35012 SmallVector<SDValue, 2> ShuffleOps;
35013 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
35014 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
35015 // If all the demanded elts are from one operand and are inline,
35016 // then we can use the operand directly.
35017 int NumOps = ShuffleOps.size();
35018 if (ShuffleMask.size() == (unsigned)NumElts &&
35019 llvm::all_of(ShuffleOps, [VT](SDValue V) {
35020 return VT.getSizeInBits() == V.getValueSizeInBits();
35023 if (DemandedElts.isSubsetOf(ShuffleUndef))
35024 return DAG.getUNDEF(VT);
35025 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
35026 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
35028 // Bitmask that indicates which ops have only been accessed 'inline'.
35029 APInt IdentityOp = APInt::getAllOnesValue(NumOps);
35030 for (int i = 0; i != NumElts; ++i) {
35031 int M = ShuffleMask[i];
35032 if (!DemandedElts[i] || ShuffleUndef[i])
35034 int Op = M / NumElts;
35035 int Index = M % NumElts;
35036 if (M < 0 || Index != i) {
35037 IdentityOp.clearAllBits();
35040 IdentityOp &= APInt::getOneBitSet(NumOps, Op);
35041 if (IdentityOp == 0)
35044 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
35045 "Multiple identity shuffles detected");
35047 if (IdentityOp != 0)
35048 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
35052 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
35053 Op, DemandedBits, DemandedElts, DAG, Depth);
35056 /// Check if a vector extract from a target-specific shuffle of a load can be
35057 /// folded into a single element load.
35058 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
35059 /// shuffles have been custom lowered so we need to handle those here.
35061 XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
35062 TargetLowering::DAGCombinerInfo &DCI) {
35063 if (DCI.isBeforeLegalizeOps())
35066 SDValue InVec = N->getOperand(0);
35067 SDValue EltNo = N->getOperand(1);
35068 EVT EltVT = N->getValueType(0);
35070 if (!isa<ConstantSDNode>(EltNo))
35073 EVT OriginalVT = InVec.getValueType();
35074 unsigned NumOriginalElts = OriginalVT.getVectorNumElements();
35076 // Peek through bitcasts, don't duplicate a load with other uses.
35077 InVec = peekThroughOneUseBitcasts(InVec);
35079 EVT CurrentVT = InVec.getValueType();
35080 if (!CurrentVT.isVector())
35083 unsigned NumCurrentElts = CurrentVT.getVectorNumElements();
35084 if ((NumOriginalElts % NumCurrentElts) != 0)
35087 if (!isTargetShuffle(InVec.getOpcode()))
35090 // Don't duplicate a load with other uses.
35091 if (!InVec.hasOneUse())
35094 SmallVector<int, 16> ShuffleMask;
35095 SmallVector<SDValue, 2> ShuffleOps;
35097 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
35098 ShuffleOps, ShuffleMask, UnaryShuffle))
35101 unsigned Scale = NumOriginalElts / NumCurrentElts;
35103 SmallVector<int, 16> ScaledMask;
35104 scaleShuffleMask<int>(Scale, ShuffleMask, ScaledMask);
35105 ShuffleMask = std::move(ScaledMask);
35107 assert(ShuffleMask.size() == NumOriginalElts && "Shuffle mask size mismatch");
35109 // Select the input vector, guarding against out of range extract vector.
35110 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
35111 int Idx = (Elt > (int)NumOriginalElts) ? SM_SentinelUndef : ShuffleMask[Elt];
35113 if (Idx == SM_SentinelZero)
35114 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
35115 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
35116 if (Idx == SM_SentinelUndef)
35117 return DAG.getUNDEF(EltVT);
35119 // Bail if any mask element is SM_SentinelZero - getVectorShuffle below
35120 // won't handle it.
35121 if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
35124 assert(0 <= Idx && Idx < (int)(2 * NumOriginalElts) &&
35125 "Shuffle index out of range");
35126 SDValue LdNode = (Idx < (int)NumOriginalElts) ? ShuffleOps[0] : ShuffleOps[1];
35128 // If inputs to shuffle are the same for both ops, then allow 2 uses
35129 unsigned AllowedUses =
35130 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
35132 if (LdNode.getOpcode() == ISD::BITCAST) {
35133 // Don't duplicate a load with other uses.
35134 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
35137 AllowedUses = 1; // only allow 1 load use if we have a bitcast
35138 LdNode = LdNode.getOperand(0);
35141 if (!ISD::isNormalLoad(LdNode.getNode()))
35144 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
35146 if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || !LN0->isSimple())
35149 // If there's a bitcast before the shuffle, check if the load type and
35150 // alignment is valid.
35151 unsigned Align = LN0->getAlignment();
35152 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35153 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
35154 EltVT.getTypeForEVT(*DAG.getContext()));
35156 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
35159 // All checks match so transform back to vector_shuffle so that DAG combiner
35160 // can finish the job
35163 // Create shuffle node taking into account the case that its a unary shuffle
35164 SDValue Shuffle = UnaryShuffle ? DAG.getUNDEF(OriginalVT)
35165 : DAG.getBitcast(OriginalVT, ShuffleOps[1]);
35166 Shuffle = DAG.getVectorShuffle(OriginalVT, dl,
35167 DAG.getBitcast(OriginalVT, ShuffleOps[0]),
35168 Shuffle, ShuffleMask);
35169 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
35173 // Helper to peek through bitops/setcc to determine size of source vector.
35174 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
35175 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
35176 switch (Src.getOpcode()) {
35178 return Src.getOperand(0).getValueSizeInBits() == Size;
35182 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
35183 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
35188 // Helper to push sign extension of vXi1 SETCC result through bitops.
35189 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
35190 SDValue Src, const SDLoc &DL) {
35191 switch (Src.getOpcode()) {
35193 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
35197 return DAG.getNode(
35198 Src.getOpcode(), DL, SExtVT,
35199 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
35200 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
35202 llvm_unreachable("Unexpected node type for vXi1 sign extension");
35205 // Try to match patterns such as
35206 // (i16 bitcast (v16i1 x))
35208 // (i16 movmsk (16i8 sext (v16i1 x)))
35209 // before the illegal vector is scalarized on subtargets that don't have legal
35211 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
35213 const X86Subtarget &Subtarget) {
35214 EVT SrcVT = Src.getValueType();
35215 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
35218 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
35219 // movmskb even with avx512. This will be better than truncating to vXi1 and
35220 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
35221 // vpcmpeqb/vpcmpgtb.
35222 bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
35223 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
35224 Src.getOperand(0).getValueType() == MVT::v32i8 ||
35225 Src.getOperand(0).getValueType() == MVT::v64i8);
35227 // With AVX512 vxi1 types are legal and we prefer using k-regs.
35228 // MOVMSK is supported in SSE2 or later.
35229 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
35232 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
35233 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
35234 // v8i16 and v16i16.
35235 // For these two cases, we can shuffle the upper element bytes to a
35236 // consecutive sequence at the start of the vector and treat the results as
35237 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
35238 // for v16i16 this is not the case, because the shuffle is expensive, so we
35239 // avoid sign-extending to this type entirely.
35240 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
35241 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
35243 bool PropagateSExt = false;
35244 switch (SrcVT.getSimpleVT().SimpleTy) {
35248 SExtVT = MVT::v2i64;
35251 SExtVT = MVT::v4i32;
35252 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
35253 // sign-extend to a 256-bit operation to avoid truncation.
35254 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
35255 SExtVT = MVT::v4i64;
35256 PropagateSExt = true;
35260 SExtVT = MVT::v8i16;
35261 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
35262 // sign-extend to a 256-bit operation to match the compare.
35263 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
35264 // 256-bit because the shuffle is cheaper than sign extending the result of
35266 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
35267 checkBitcastSrcVectorSize(Src, 512))) {
35268 SExtVT = MVT::v8i32;
35269 PropagateSExt = true;
35273 SExtVT = MVT::v16i8;
35274 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
35275 // it is not profitable to sign-extend to 256-bit because this will
35276 // require an extra cross-lane shuffle which is more expensive than
35277 // truncating the result of the compare to 128-bits.
35280 SExtVT = MVT::v32i8;
35283 // If we have AVX512F, but not AVX512BW and the input is truncated from
35284 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
35285 if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
35286 SExtVT = MVT::v64i8;
35292 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
35293 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
35295 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
35296 V = getPMOVMSKB(DL, V, DAG, Subtarget);
35298 if (SExtVT == MVT::v8i16)
35299 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
35300 DAG.getUNDEF(MVT::v8i16));
35301 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
35305 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
35306 V = DAG.getZExtOrTrunc(V, DL, IntVT);
35307 return DAG.getBitcast(VT, V);
35310 // Convert a vXi1 constant build vector to the same width scalar integer.
35311 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
35312 EVT SrcVT = Op.getValueType();
35313 assert(SrcVT.getVectorElementType() == MVT::i1 &&
35314 "Expected a vXi1 vector");
35315 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
35316 "Expected a constant build vector");
35318 APInt Imm(SrcVT.getVectorNumElements(), 0);
35319 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
35320 SDValue In = Op.getOperand(Idx);
35321 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
35324 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
35325 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
35328 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
35329 TargetLowering::DAGCombinerInfo &DCI,
35330 const X86Subtarget &Subtarget) {
35331 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
35333 if (!DCI.isBeforeLegalizeOps())
35336 // Only do this if we have k-registers.
35337 if (!Subtarget.hasAVX512())
35340 EVT DstVT = N->getValueType(0);
35341 SDValue Op = N->getOperand(0);
35342 EVT SrcVT = Op.getValueType();
35344 if (!Op.hasOneUse())
35347 // Look for logic ops.
35348 if (Op.getOpcode() != ISD::AND &&
35349 Op.getOpcode() != ISD::OR &&
35350 Op.getOpcode() != ISD::XOR)
35353 // Make sure we have a bitcast between mask registers and a scalar type.
35354 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
35355 DstVT.isScalarInteger()) &&
35356 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
35357 SrcVT.isScalarInteger()))
35360 SDValue LHS = Op.getOperand(0);
35361 SDValue RHS = Op.getOperand(1);
35363 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
35364 LHS.getOperand(0).getValueType() == DstVT)
35365 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
35366 DAG.getBitcast(DstVT, RHS));
35368 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
35369 RHS.getOperand(0).getValueType() == DstVT)
35370 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
35371 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
35373 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
35374 // Most of these have to move a constant from the scalar domain anyway.
35375 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
35376 RHS = combinevXi1ConstantToInteger(RHS, DAG);
35377 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
35378 DAG.getBitcast(DstVT, LHS), RHS);
35384 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
35385 const X86Subtarget &Subtarget) {
35387 unsigned NumElts = BV->getNumOperands();
35388 SDValue Splat = BV->getSplatValue();
35390 // Build MMX element from integer GPR or SSE float values.
35391 auto CreateMMXElement = [&](SDValue V) {
35393 return DAG.getUNDEF(MVT::x86mmx);
35394 if (V.getValueType().isFloatingPoint()) {
35395 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
35396 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
35397 V = DAG.getBitcast(MVT::v2i64, V);
35398 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
35400 V = DAG.getBitcast(MVT::i32, V);
35402 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
35404 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
35407 // Convert build vector ops to MMX data in the bottom elements.
35408 SmallVector<SDValue, 8> Ops;
35410 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
35412 if (Splat.isUndef())
35413 return DAG.getUNDEF(MVT::x86mmx);
35415 Splat = CreateMMXElement(Splat);
35417 if (Subtarget.hasSSE1()) {
35418 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
35420 Splat = DAG.getNode(
35421 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
35422 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
35425 // Use PSHUFW to repeat 16-bit elements.
35426 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
35427 return DAG.getNode(
35428 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
35429 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
35430 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
35432 Ops.append(NumElts, Splat);
35434 for (unsigned i = 0; i != NumElts; ++i)
35435 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
35438 // Use tree of PUNPCKLs to build up general MMX vector.
35439 while (Ops.size() > 1) {
35440 unsigned NumOps = Ops.size();
35441 unsigned IntrinOp =
35442 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
35443 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
35444 : Intrinsic::x86_mmx_punpcklbw));
35445 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
35446 for (unsigned i = 0; i != NumOps; i += 2)
35447 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
35448 Ops[i], Ops[i + 1]);
35449 Ops.resize(NumOps / 2);
35455 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
35456 TargetLowering::DAGCombinerInfo &DCI,
35457 const X86Subtarget &Subtarget) {
35458 SDValue N0 = N->getOperand(0);
35459 EVT VT = N->getValueType(0);
35460 EVT SrcVT = N0.getValueType();
35462 // Try to match patterns such as
35463 // (i16 bitcast (v16i1 x))
35465 // (i16 movmsk (16i8 sext (v16i1 x)))
35466 // before the setcc result is scalarized on subtargets that don't have legal
35468 if (DCI.isBeforeLegalize()) {
35470 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
35473 // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
35474 // legalization destroys the v4i32 type.
35475 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
35476 VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
35477 N0.getOperand(0).getValueType() == MVT::v4i32 &&
35478 ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
35479 cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
35480 SDValue N00 = N0.getOperand(0);
35481 // Only do this if we can avoid scalarizing the input.
35482 if (ISD::isNormalLoad(N00.getNode()) ||
35483 (N00.getOpcode() == ISD::BITCAST &&
35484 N00.getOperand(0).getValueType() == MVT::v4f32)) {
35485 SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
35486 DAG.getBitcast(MVT::v4f32, N00));
35487 return DAG.getZExtOrTrunc(V, dl, VT);
35491 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
35492 // type, widen both sides to avoid a trip through memory.
35493 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
35494 Subtarget.hasAVX512()) {
35495 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
35496 N0 = DAG.getBitcast(MVT::v8i1, N0);
35497 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
35498 DAG.getIntPtrConstant(0, dl));
35501 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
35502 // type, widen both sides to avoid a trip through memory.
35503 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
35504 Subtarget.hasAVX512()) {
35505 // Use zeros for the widening if we already have some zeroes. This can
35506 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
35508 // FIXME: It might make sense to detect a concat_vectors with a mix of
35509 // zeroes and undef and turn it into insert_subvector for i1 vectors as
35510 // a separate combine. What we can't do is canonicalize the operands of
35511 // such a concat or we'll get into a loop with SimplifyDemandedBits.
35512 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
35513 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
35514 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
35515 SrcVT = LastOp.getValueType();
35516 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
35517 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
35518 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
35519 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
35520 N0 = DAG.getBitcast(MVT::i8, N0);
35521 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
35525 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
35526 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
35528 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
35529 N0 = DAG.getBitcast(MVT::i8, N0);
35530 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
35534 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
35535 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
35536 // due to insert_subvector legalization on KNL. By promoting the copy to i16
35537 // we can help with known bits propagation from the vXi1 domain to the
35539 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
35540 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
35541 N0.getOperand(0).getValueType() == MVT::v16i1 &&
35542 isNullConstant(N0.getOperand(1)))
35543 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
35544 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
35546 // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
35547 // determines // the number of bits loaded. Remaining bits are zero.
35548 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
35549 VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
35550 auto *BCast = cast<MemIntrinsicSDNode>(N0);
35551 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
35552 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
35554 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
35555 VT.getVectorElementType(),
35556 BCast->getMemOperand());
35557 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
35561 // Since MMX types are special and don't usually play with other vector types,
35562 // it's better to handle them early to be sure we emit efficient code by
35563 // avoiding store-load conversions.
35564 if (VT == MVT::x86mmx) {
35565 // Detect MMX constant vectors.
35567 SmallVector<APInt, 1> EltBits;
35568 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
35570 // Handle zero-extension of i32 with MOVD.
35571 if (EltBits[0].countLeadingZeros() >= 32)
35572 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
35573 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
35574 // Else, bitcast to a double.
35575 // TODO - investigate supporting sext 32-bit immediates on x86_64.
35576 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
35577 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
35580 // Detect bitcasts to x86mmx low word.
35581 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35582 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
35583 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
35584 bool LowUndef = true, AllUndefOrZero = true;
35585 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
35586 SDValue Op = N0.getOperand(i);
35587 LowUndef &= Op.isUndef() || (i >= e/2);
35588 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
35590 if (AllUndefOrZero) {
35591 SDValue N00 = N0.getOperand(0);
35593 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
35594 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
35595 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
35599 // Detect bitcasts of 64-bit build vectors and convert to a
35600 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
35602 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35603 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
35604 SrcVT == MVT::v8i8))
35605 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
35607 // Detect bitcasts between element or subvector extraction to x86mmx.
35608 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
35609 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
35610 isNullConstant(N0.getOperand(1))) {
35611 SDValue N00 = N0.getOperand(0);
35612 if (N00.getValueType().is128BitVector())
35613 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
35614 DAG.getBitcast(MVT::v2i64, N00));
35617 // Detect bitcasts from FP_TO_SINT to x86mmx.
35618 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
35620 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
35621 DAG.getUNDEF(MVT::v2i32));
35622 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
35623 DAG.getBitcast(MVT::v2i64, Res));
35627 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
35628 // most of these to scalar anyway.
35629 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
35630 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
35631 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
35632 return combinevXi1ConstantToInteger(N0, DAG);
35635 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
35636 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
35637 isa<ConstantSDNode>(N0)) {
35638 auto *C = cast<ConstantSDNode>(N0);
35639 if (C->isAllOnesValue())
35640 return DAG.getConstant(1, SDLoc(N0), VT);
35641 if (C->isNullValue())
35642 return DAG.getConstant(0, SDLoc(N0), VT);
35645 // Try to remove bitcasts from input and output of mask arithmetic to
35646 // remove GPR<->K-register crossings.
35647 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
35650 // Convert a bitcasted integer logic operation that has one bitcasted
35651 // floating-point operand into a floating-point logic operation. This may
35652 // create a load of a constant, but that is cheaper than materializing the
35653 // constant in an integer register and transferring it to an SSE register or
35654 // transferring the SSE operand to integer register and back.
35656 switch (N0.getOpcode()) {
35657 case ISD::AND: FPOpcode = X86ISD::FAND; break;
35658 case ISD::OR: FPOpcode = X86ISD::FOR; break;
35659 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
35660 default: return SDValue();
35663 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
35664 (Subtarget.hasSSE2() && VT == MVT::f64)))
35667 SDValue LogicOp0 = N0.getOperand(0);
35668 SDValue LogicOp1 = N0.getOperand(1);
35671 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
35672 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
35673 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
35674 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
35675 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
35676 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
35678 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
35679 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
35680 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
35681 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
35682 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
35683 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
35689 // Given a ABS node, detect the following pattern:
35690 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
35691 // This is useful as it is the input into a SAD pattern.
35692 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
35693 SDValue AbsOp1 = Abs->getOperand(0);
35694 if (AbsOp1.getOpcode() != ISD::SUB)
35697 Op0 = AbsOp1.getOperand(0);
35698 Op1 = AbsOp1.getOperand(1);
35700 // Check if the operands of the sub are zero-extended from vectors of i8.
35701 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
35702 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
35703 Op1.getOpcode() != ISD::ZERO_EXTEND ||
35704 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
35710 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
35712 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
35713 const SDValue &Zext1, const SDLoc &DL,
35714 const X86Subtarget &Subtarget) {
35715 // Find the appropriate width for the PSADBW.
35716 EVT InVT = Zext0.getOperand(0).getValueType();
35717 unsigned RegSize = std::max(128u, InVT.getSizeInBits());
35719 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
35720 // fill in the missing vector elements with 0.
35721 unsigned NumConcat = RegSize / InVT.getSizeInBits();
35722 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
35723 Ops[0] = Zext0.getOperand(0);
35724 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
35725 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35726 Ops[0] = Zext1.getOperand(0);
35727 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35729 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
35730 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
35731 ArrayRef<SDValue> Ops) {
35732 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
35733 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
35735 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
35736 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
35740 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
35742 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
35743 const X86Subtarget &Subtarget) {
35744 // Bail without SSE41.
35745 if (!Subtarget.hasSSE41())
35748 EVT ExtractVT = Extract->getValueType(0);
35749 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
35752 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
35753 ISD::NodeType BinOp;
35754 SDValue Src = DAG.matchBinOpReduction(
35755 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
35759 EVT SrcVT = Src.getValueType();
35760 EVT SrcSVT = SrcVT.getScalarType();
35761 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
35765 SDValue MinPos = Src;
35767 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
35768 while (SrcVT.getSizeInBits() > 128) {
35769 unsigned NumElts = SrcVT.getVectorNumElements();
35770 unsigned NumSubElts = NumElts / 2;
35771 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
35772 unsigned SubSizeInBits = SrcVT.getSizeInBits();
35773 SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
35774 SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
35775 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
35777 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
35778 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
35779 "Unexpected value type");
35781 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
35782 // to flip the value accordingly.
35784 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
35785 if (BinOp == ISD::SMAX)
35786 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
35787 else if (BinOp == ISD::SMIN)
35788 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
35789 else if (BinOp == ISD::UMAX)
35790 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
35793 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35795 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
35796 // shuffling each upper element down and insert zeros. This means that the
35797 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
35798 // ready for the PHMINPOS.
35799 if (ExtractVT == MVT::i8) {
35800 SDValue Upper = DAG.getVectorShuffle(
35801 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
35802 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
35803 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
35806 // Perform the PHMINPOS on a v8i16 vector,
35807 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
35808 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
35809 MinPos = DAG.getBitcast(SrcVT, MinPos);
35812 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35814 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
35815 DAG.getIntPtrConstant(0, DL));
35818 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
35819 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
35821 const X86Subtarget &Subtarget) {
35822 // Bail without SSE2.
35823 if (!Subtarget.hasSSE2())
35826 EVT ExtractVT = Extract->getValueType(0);
35827 unsigned BitWidth = ExtractVT.getSizeInBits();
35828 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
35829 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
35832 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
35833 ISD::NodeType BinOp;
35834 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
35835 if (!Match && ExtractVT == MVT::i1)
35836 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
35840 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
35841 // which we can't support here for now.
35842 if (Match.getScalarValueSizeInBits() != BitWidth)
35847 EVT MatchVT = Match.getValueType();
35848 unsigned NumElts = MatchVT.getVectorNumElements();
35849 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
35850 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35852 if (ExtractVT == MVT::i1) {
35853 // Special case for (pre-legalization) vXi1 reductions.
35854 if (NumElts > 64 || !isPowerOf2_32(NumElts))
35856 if (TLI.isTypeLegal(MatchVT)) {
35857 // If this is a legal AVX512 predicate type then we can just bitcast.
35858 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35859 Movmsk = DAG.getBitcast(MovmskVT, Match);
35861 // Use combineBitcastvxi1 to create the MOVMSK.
35862 while (NumElts > MaxElts) {
35864 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35865 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35868 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35869 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
35873 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
35875 // Bail with AVX512VL (which uses predicate registers).
35876 if (Subtarget.hasVLX())
35879 unsigned MatchSizeInBits = Match.getValueSizeInBits();
35880 if (!(MatchSizeInBits == 128 ||
35881 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
35884 // Make sure this isn't a vector of 1 element. The perf win from using
35885 // MOVMSK diminishes with less elements in the reduction, but it is
35886 // generally better to get the comparison over to the GPRs as soon as
35887 // possible to reduce the number of vector ops.
35888 if (Match.getValueType().getVectorNumElements() < 2)
35891 // Check that we are extracting a reduction of all sign bits.
35892 if (DAG.ComputeNumSignBits(Match) != BitWidth)
35895 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
35897 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35898 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35899 MatchSizeInBits = Match.getValueSizeInBits();
35902 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
35904 if (64 == BitWidth || 32 == BitWidth)
35905 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
35906 MatchSizeInBits / BitWidth);
35908 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
35910 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
35911 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
35912 NumElts = MaskSrcVT.getVectorNumElements();
35914 assert((NumElts <= 32 || NumElts == 64) &&
35915 "Not expecting more than 64 elements");
35917 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
35918 if (BinOp == ISD::XOR) {
35919 // parity -> (AND (CTPOP(MOVMSK X)), 1)
35920 SDValue Mask = DAG.getConstant(1, DL, CmpVT);
35921 SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
35922 Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
35923 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
35927 ISD::CondCode CondCode;
35928 if (BinOp == ISD::OR) {
35929 // any_of -> MOVMSK != 0
35930 CmpC = DAG.getConstant(0, DL, CmpVT);
35931 CondCode = ISD::CondCode::SETNE;
35933 // all_of -> MOVMSK == ((1 << NumElts) - 1)
35934 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
35936 CondCode = ISD::CondCode::SETEQ;
35939 // The setcc produces an i8 of 0/1, so extend that to the result width and
35940 // negate to get the final 0/-1 mask value.
35942 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
35943 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
35944 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
35945 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
35946 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
35949 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
35950 const X86Subtarget &Subtarget) {
35951 // PSADBW is only supported on SSE2 and up.
35952 if (!Subtarget.hasSSE2())
35955 // Verify the type we're extracting from is any integer type above i16.
35956 EVT VT = Extract->getOperand(0).getValueType();
35957 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
35960 unsigned RegSize = 128;
35961 if (Subtarget.useBWIRegs())
35963 else if (Subtarget.hasAVX())
35966 // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
35967 // TODO: We should be able to handle larger vectors by splitting them before
35968 // feeding them into several SADs, and then reducing over those.
35969 if (RegSize / VT.getVectorNumElements() < 8)
35972 // Match shuffle + add pyramid.
35973 ISD::NodeType BinOp;
35974 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
35976 // The operand is expected to be zero extended from i8
35977 // (verified in detectZextAbsDiff).
35978 // In order to convert to i64 and above, additional any/zero/sign
35979 // extend is expected.
35980 // The zero extend from 32 bit has no mathematical effect on the result.
35981 // Also the sign extend is basically zero extend
35982 // (extends the sign bit which is zero).
35983 // So it is correct to skip the sign/zero extend instruction.
35984 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
35985 Root.getOpcode() == ISD::ZERO_EXTEND ||
35986 Root.getOpcode() == ISD::ANY_EXTEND))
35987 Root = Root.getOperand(0);
35989 // If there was a match, we want Root to be a select that is the root of an
35990 // abs-diff pattern.
35991 if (!Root || Root.getOpcode() != ISD::ABS)
35994 // Check whether we have an abs-diff pattern feeding into the select.
35995 SDValue Zext0, Zext1;
35996 if (!detectZextAbsDiff(Root, Zext0, Zext1))
35999 // Create the SAD instruction.
36001 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
36003 // If the original vector was wider than 8 elements, sum over the results
36004 // in the SAD vector.
36005 unsigned Stages = Log2_32(VT.getVectorNumElements());
36006 MVT SadVT = SAD.getSimpleValueType();
36008 unsigned SadElems = SadVT.getVectorNumElements();
36010 for(unsigned i = Stages - 3; i > 0; --i) {
36011 SmallVector<int, 16> Mask(SadElems, -1);
36012 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
36013 Mask[j] = MaskEnd + j;
36016 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
36017 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
36021 MVT Type = Extract->getSimpleValueType(0);
36022 unsigned TypeSizeInBits = Type.getSizeInBits();
36023 // Return the lowest TypeSizeInBits bits.
36024 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
36025 SAD = DAG.getBitcast(ResVT, SAD);
36026 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
36027 Extract->getOperand(1));
36030 // Attempt to peek through a target shuffle and extract the scalar from the
36032 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
36033 TargetLowering::DAGCombinerInfo &DCI,
36034 const X86Subtarget &Subtarget) {
36035 if (DCI.isBeforeLegalizeOps())
36039 SDValue Src = N->getOperand(0);
36040 SDValue Idx = N->getOperand(1);
36042 EVT VT = N->getValueType(0);
36043 EVT SrcVT = Src.getValueType();
36044 EVT SrcSVT = SrcVT.getVectorElementType();
36045 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36047 // Don't attempt this for boolean mask vectors or unknown extraction indices.
36048 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
36051 SDValue SrcBC = peekThroughBitcasts(Src);
36053 // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
36054 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
36055 SDValue SrcOp = SrcBC.getOperand(0);
36056 if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
36057 return DAG.getBitcast(VT, SrcOp);
36060 // If we're extracting a single element from a broadcast load and there are
36061 // no other users, just create a single load.
36062 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
36063 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
36064 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
36065 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
36066 VT.getSizeInBits() == SrcBCWidth) {
36067 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
36068 MemIntr->getBasePtr(),
36069 MemIntr->getPointerInfo(),
36070 MemIntr->getAlignment(),
36071 MemIntr->getMemOperand()->getFlags());
36072 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
36077 // Handle extract(truncate(x)) for 0'th index.
36078 // TODO: Treat this as a faux shuffle?
36079 // TODO: When can we use this for general indices?
36080 if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
36081 isNullConstant(Idx)) {
36082 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
36083 Src = DAG.getBitcast(SrcVT, Src);
36084 return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
36087 // Resolve the target shuffle inputs and mask.
36088 SmallVector<int, 16> Mask;
36089 SmallVector<SDValue, 2> Ops;
36090 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
36093 // Attempt to narrow/widen the shuffle mask to the correct size.
36094 if (Mask.size() != NumSrcElts) {
36095 if ((NumSrcElts % Mask.size()) == 0) {
36096 SmallVector<int, 16> ScaledMask;
36097 int Scale = NumSrcElts / Mask.size();
36098 scaleShuffleMask<int>(Scale, Mask, ScaledMask);
36099 Mask = std::move(ScaledMask);
36100 } else if ((Mask.size() % NumSrcElts) == 0) {
36101 // Simplify Mask based on demanded element.
36102 int ExtractIdx = (int)N->getConstantOperandVal(1);
36103 int Scale = Mask.size() / NumSrcElts;
36104 int Lo = Scale * ExtractIdx;
36105 int Hi = Scale * (ExtractIdx + 1);
36106 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
36107 if (i < Lo || Hi <= i)
36108 Mask[i] = SM_SentinelUndef;
36110 SmallVector<int, 16> WidenedMask;
36111 while (Mask.size() > NumSrcElts &&
36112 canWidenShuffleElements(Mask, WidenedMask))
36113 Mask = std::move(WidenedMask);
36114 // TODO - investigate support for wider shuffle masks with known upper
36115 // undef/zero elements for implicit zero-extension.
36119 // Check if narrowing/widening failed.
36120 if (Mask.size() != NumSrcElts)
36123 int SrcIdx = Mask[N->getConstantOperandVal(1)];
36125 // If the shuffle source element is undef/zero then we can just accept it.
36126 if (SrcIdx == SM_SentinelUndef)
36127 return DAG.getUNDEF(VT);
36129 if (SrcIdx == SM_SentinelZero)
36130 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
36131 : DAG.getConstant(0, dl, VT);
36133 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
36134 SrcIdx = SrcIdx % Mask.size();
36136 // We can only extract other elements from 128-bit vectors and in certain
36137 // circumstances, depending on SSE-level.
36138 // TODO: Investigate using extract_subvector for larger vectors.
36139 // TODO: Investigate float/double extraction if it will be just stored.
36140 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
36141 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
36142 assert(SrcSVT == VT && "Unexpected extraction type");
36143 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
36144 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
36145 DAG.getIntPtrConstant(SrcIdx, dl));
36148 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
36149 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
36150 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
36151 "Unexpected extraction type");
36152 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
36153 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
36154 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
36155 DAG.getIntPtrConstant(SrcIdx, dl));
36156 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
36162 /// Extracting a scalar FP value from vector element 0 is free, so extract each
36163 /// operand first, then perform the math as a scalar op.
36164 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
36165 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
36166 SDValue Vec = ExtElt->getOperand(0);
36167 SDValue Index = ExtElt->getOperand(1);
36168 EVT VT = ExtElt->getValueType(0);
36169 EVT VecVT = Vec.getValueType();
36171 // TODO: If this is a unary/expensive/expand op, allow extraction from a
36172 // non-zero element because the shuffle+scalar op will be cheaper?
36173 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
36176 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
36177 // extract, the condition code), so deal with those as a special-case.
36178 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
36179 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
36180 if (OpVT != MVT::f32 && OpVT != MVT::f64)
36183 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
36185 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
36186 Vec.getOperand(0), Index);
36187 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
36188 Vec.getOperand(1), Index);
36189 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
36192 if (VT != MVT::f32 && VT != MVT::f64)
36195 // Vector FP selects don't fit the pattern of FP math ops (because the
36196 // condition has a different type and we have to change the opcode), so deal
36197 // with those here.
36198 // FIXME: This is restricted to pre type legalization by ensuring the setcc
36199 // has i1 elements. If we loosen this we need to convert vector bool to a
36201 if (Vec.getOpcode() == ISD::VSELECT &&
36202 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
36203 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
36204 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
36205 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
36207 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
36208 Vec.getOperand(0).getValueType().getScalarType(),
36209 Vec.getOperand(0), Index);
36210 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
36211 Vec.getOperand(1), Index);
36212 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
36213 Vec.getOperand(2), Index);
36214 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
36217 // TODO: This switch could include FNEG and the x86-specific FP logic ops
36218 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
36219 // missed load folding and fma+fneg combining.
36220 switch (Vec.getOpcode()) {
36221 case ISD::FMA: // Begin 3 operands
36223 case ISD::FADD: // Begin 2 operands
36228 case ISD::FCOPYSIGN:
36231 case ISD::FMINNUM_IEEE:
36232 case ISD::FMAXNUM_IEEE:
36233 case ISD::FMAXIMUM:
36234 case ISD::FMINIMUM:
36237 case ISD::FABS: // Begin 1 operand
36242 case ISD::FNEARBYINT:
36246 case X86ISD::FRSQRT: {
36247 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
36249 SmallVector<SDValue, 4> ExtOps;
36250 for (SDValue Op : Vec->ops())
36251 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
36252 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
36257 llvm_unreachable("All opcodes should return within switch");
36260 /// Try to convert a vector reduction sequence composed of binops and shuffles
36261 /// into horizontal ops.
36262 static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
36263 const X86Subtarget &Subtarget) {
36264 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
36268 DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
36272 SDValue Index = ExtElt->getOperand(1);
36273 assert(isNullConstant(Index) &&
36274 "Reduction doesn't end in an extract from index 0");
36276 EVT VT = ExtElt->getValueType(0);
36277 EVT VecVT = Rdx.getValueType();
36278 if (VecVT.getScalarType() != VT)
36283 // vXi8 reduction - sub 128-bit vector.
36284 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
36285 if (VecVT == MVT::v4i8) {
36287 if (Subtarget.hasSSE41()) {
36288 Rdx = DAG.getBitcast(MVT::i32, Rdx);
36289 Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
36290 DAG.getConstant(0, DL, MVT::v4i32), Rdx,
36291 DAG.getIntPtrConstant(0, DL));
36292 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36294 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
36295 DAG.getConstant(0, DL, VecVT));
36298 if (Rdx.getValueType() == MVT::v8i8) {
36300 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
36301 DAG.getUNDEF(MVT::v8i8));
36303 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
36304 DAG.getConstant(0, DL, MVT::v16i8));
36305 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36306 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36309 // Must be a >=128-bit vector with pow2 elements.
36310 if ((VecVT.getSizeInBits() % 128) != 0 ||
36311 !isPowerOf2_32(VecVT.getVectorNumElements()))
36314 // vXi8 reduction - sum lo/hi halves then use PSADBW.
36315 if (VT == MVT::i8) {
36316 while (Rdx.getValueSizeInBits() > 128) {
36317 unsigned HalfSize = VecVT.getSizeInBits() / 2;
36318 unsigned HalfElts = VecVT.getVectorNumElements() / 2;
36319 SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
36320 SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
36321 Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
36322 VecVT = Rdx.getValueType();
36324 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
36326 SDValue Hi = DAG.getVectorShuffle(
36327 MVT::v16i8, DL, Rdx, Rdx,
36328 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
36329 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
36330 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
36331 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
36332 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36333 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36336 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
36337 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
36338 if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
36341 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
36343 // 256-bit horizontal instructions operate on 128-bit chunks rather than
36344 // across the whole vector, so we need an extract + hop preliminary stage.
36345 // This is the only step where the operands of the hop are not the same value.
36346 // TODO: We could extend this to handle 512-bit or even longer vectors.
36347 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
36348 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
36349 unsigned NumElts = VecVT.getVectorNumElements();
36350 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
36351 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
36352 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
36353 VecVT = Rdx.getValueType();
36355 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
36356 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
36359 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
36360 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
36361 for (unsigned i = 0; i != ReductionSteps; ++i)
36362 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
36364 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36367 /// Detect vector gather/scatter index generation and convert it from being a
36368 /// bunch of shuffles and extracts into a somewhat faster sequence.
36369 /// For i686, the best sequence is apparently storing the value and loading
36370 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
36371 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
36372 TargetLowering::DAGCombinerInfo &DCI,
36373 const X86Subtarget &Subtarget) {
36374 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
36377 SDValue InputVector = N->getOperand(0);
36378 SDValue EltIdx = N->getOperand(1);
36379 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
36381 EVT SrcVT = InputVector.getValueType();
36382 EVT VT = N->getValueType(0);
36383 SDLoc dl(InputVector);
36384 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
36386 if (CIdx && CIdx->getAPIntValue().uge(SrcVT.getVectorNumElements()))
36387 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
36389 // Integer Constant Folding.
36390 if (CIdx && VT.isInteger()) {
36391 APInt UndefVecElts;
36392 SmallVector<APInt, 16> EltBits;
36393 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
36394 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
36395 EltBits, true, false)) {
36396 uint64_t Idx = CIdx->getZExtValue();
36397 if (UndefVecElts[Idx])
36398 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
36399 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
36405 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36406 if (TLI.SimplifyDemandedBits(
36407 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
36408 return SDValue(N, 0);
36410 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
36411 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
36412 InputVector.getOpcode() == X86ISD::PINSRW) &&
36413 InputVector.getOperand(2) == EltIdx) {
36414 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
36415 "Vector type mismatch");
36416 SDValue Scl = InputVector.getOperand(1);
36417 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
36418 return DAG.getZExtOrTrunc(Scl, dl, VT);
36421 // TODO - Remove this once we can handle the implicit zero-extension of
36422 // X86ISD::PEXTRW/X86ISD::PEXTRB in XFormVExtractWithShuffleIntoLoad,
36423 // combineHorizontalPredicateResult and combineBasicSADPattern.
36427 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
36430 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
36431 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
36432 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
36433 SDValue MMXSrc = InputVector.getOperand(0);
36435 // The bitcast source is a direct mmx result.
36436 if (MMXSrc.getValueType() == MVT::x86mmx)
36437 return DAG.getBitcast(VT, InputVector);
36440 // Detect mmx to i32 conversion through a v2i32 elt extract.
36441 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
36442 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
36443 SDValue MMXSrc = InputVector.getOperand(0);
36445 // The bitcast source is a direct mmx result.
36446 if (MMXSrc.getValueType() == MVT::x86mmx)
36447 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
36450 // Check whether this extract is the root of a sum of absolute differences
36451 // pattern. This has to be done here because we really want it to happen
36452 // pre-legalization,
36453 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
36456 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
36457 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
36460 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
36461 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
36464 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
36467 if (SDValue V = scalarizeExtEltFP(N, DAG))
36470 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
36471 // and then testing the relevant element.
36472 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
36473 SmallVector<SDNode *, 16> BoolExtracts;
36474 auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
36475 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
36476 isa<ConstantSDNode>(Use->getOperand(1)) &&
36477 Use->getValueType(0) == MVT::i1) {
36478 BoolExtracts.push_back(Use);
36483 if (all_of(InputVector->uses(), IsBoolExtract) &&
36484 BoolExtracts.size() > 1) {
36485 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36486 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
36488 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
36489 for (SDNode *Use : BoolExtracts) {
36490 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
36491 unsigned MaskIdx = Use->getConstantOperandVal(1);
36492 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
36493 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
36494 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
36495 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
36496 DCI.CombineTo(Use, Res);
36498 return SDValue(N, 0);
36506 /// If a vector select has an operand that is -1 or 0, try to simplify the
36507 /// select to a bitwise logic operation.
36508 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
36510 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
36511 TargetLowering::DAGCombinerInfo &DCI,
36512 const X86Subtarget &Subtarget) {
36513 SDValue Cond = N->getOperand(0);
36514 SDValue LHS = N->getOperand(1);
36515 SDValue RHS = N->getOperand(2);
36516 EVT VT = LHS.getValueType();
36517 EVT CondVT = Cond.getValueType();
36519 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36521 if (N->getOpcode() != ISD::VSELECT)
36524 assert(CondVT.isVector() && "Vector select expects a vector selector!");
36526 // Check if the first operand is all zeros and Cond type is vXi1.
36527 // This situation only applies to avx512.
36528 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
36529 // TODO: Can we assert that both operands are not zeros (because that should
36530 // get simplified at node creation time)?
36531 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
36532 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
36534 // If both inputs are 0/undef, create a complete zero vector.
36535 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
36536 if (TValIsAllZeros && FValIsAllZeros) {
36537 if (VT.isFloatingPoint())
36538 return DAG.getConstantFP(0.0, DL, VT);
36539 return DAG.getConstant(0, DL, VT);
36542 if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
36543 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
36544 // Invert the cond to not(cond) : xor(op,allones)=not(op)
36545 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
36546 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
36547 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
36550 // To use the condition operand as a bitwise mask, it must have elements that
36551 // are the same size as the select elements. Ie, the condition operand must
36552 // have already been promoted from the IR select condition type <N x i1>.
36553 // Don't check if the types themselves are equal because that excludes
36554 // vector floating-point selects.
36555 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
36558 // Try to invert the condition if true value is not all 1s and false value is
36559 // not all 0s. Only do this if the condition has one use.
36560 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
36561 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
36562 // Check if the selector will be produced by CMPP*/PCMP*.
36563 Cond.getOpcode() == ISD::SETCC &&
36564 // Check if SETCC has already been promoted.
36565 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
36567 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
36569 if (TValIsAllZeros || FValIsAllOnes) {
36570 SDValue CC = Cond.getOperand(2);
36571 ISD::CondCode NewCC =
36572 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
36573 Cond.getOperand(0).getValueType().isInteger());
36574 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
36576 std::swap(LHS, RHS);
36577 TValIsAllOnes = FValIsAllOnes;
36578 FValIsAllZeros = TValIsAllZeros;
36582 // Cond value must be 'sign splat' to be converted to a logical op.
36583 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
36586 // vselect Cond, 111..., 000... -> Cond
36587 if (TValIsAllOnes && FValIsAllZeros)
36588 return DAG.getBitcast(VT, Cond);
36590 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
36593 // vselect Cond, 111..., X -> or Cond, X
36594 if (TValIsAllOnes) {
36595 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
36596 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
36597 return DAG.getBitcast(VT, Or);
36600 // vselect Cond, X, 000... -> and Cond, X
36601 if (FValIsAllZeros) {
36602 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
36603 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
36604 return DAG.getBitcast(VT, And);
36607 // vselect Cond, 000..., X -> andn Cond, X
36608 if (TValIsAllZeros) {
36609 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
36610 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
36611 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
36612 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
36613 return DAG.getBitcast(VT, AndN);
36619 /// If both arms of a vector select are concatenated vectors, split the select,
36620 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
36621 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
36622 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
36623 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
36624 const X86Subtarget &Subtarget) {
36625 unsigned Opcode = N->getOpcode();
36626 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
36629 // TODO: Split 512-bit vectors too?
36630 EVT VT = N->getValueType(0);
36631 if (!VT.is256BitVector())
36634 // TODO: Split as long as any 2 of the 3 operands are concatenated?
36635 SDValue Cond = N->getOperand(0);
36636 SDValue TVal = N->getOperand(1);
36637 SDValue FVal = N->getOperand(2);
36638 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
36639 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
36640 !collectConcatOps(TVal.getNode(), CatOpsT) ||
36641 !collectConcatOps(FVal.getNode(), CatOpsF))
36644 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
36645 ArrayRef<SDValue> Ops) {
36646 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
36648 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
36649 makeBlend, /*CheckBWI*/ false);
36652 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
36653 SDValue Cond = N->getOperand(0);
36654 SDValue LHS = N->getOperand(1);
36655 SDValue RHS = N->getOperand(2);
36658 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
36659 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
36660 if (!TrueC || !FalseC)
36663 // Don't do this for crazy integer types.
36664 EVT VT = N->getValueType(0);
36665 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
36668 // We're going to use the condition bit in math or logic ops. We could allow
36669 // this with a wider condition value (post-legalization it becomes an i8),
36670 // but if nothing is creating selects that late, it doesn't matter.
36671 if (Cond.getValueType() != MVT::i1)
36674 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
36675 // 3, 5, or 9 with i32/i64, so those get transformed too.
36676 // TODO: For constants that overflow or do not differ by power-of-2 or small
36677 // multiplier, convert to 'and' + 'add'.
36678 const APInt &TrueVal = TrueC->getAPIntValue();
36679 const APInt &FalseVal = FalseC->getAPIntValue();
36681 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
36685 APInt AbsDiff = Diff.abs();
36686 if (AbsDiff.isPowerOf2() ||
36687 ((VT == MVT::i32 || VT == MVT::i64) &&
36688 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
36690 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
36691 // of the condition can usually be folded into a compare predicate, but even
36692 // without that, the sequence should be cheaper than a CMOV alternative.
36693 if (TrueVal.slt(FalseVal)) {
36694 Cond = DAG.getNOT(DL, Cond, MVT::i1);
36695 std::swap(TrueC, FalseC);
36698 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
36699 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
36701 // Multiply condition by the difference if non-one.
36702 if (!AbsDiff.isOneValue())
36703 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
36705 // Add the base if non-zero.
36706 if (!FalseC->isNullValue())
36707 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
36715 /// If this is a *dynamic* select (non-constant condition) and we can match
36716 /// this node with one of the variable blend instructions, restructure the
36717 /// condition so that blends can use the high (sign) bit of each element.
36718 /// This function will also call SimplifyDemandedBits on already created
36719 /// BLENDV to perform additional simplifications.
36720 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
36721 TargetLowering::DAGCombinerInfo &DCI,
36722 const X86Subtarget &Subtarget) {
36723 SDValue Cond = N->getOperand(0);
36724 if ((N->getOpcode() != ISD::VSELECT &&
36725 N->getOpcode() != X86ISD::BLENDV) ||
36726 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
36729 // Don't optimize before the condition has been transformed to a legal type
36730 // and don't ever optimize vector selects that map to AVX512 mask-registers.
36731 unsigned BitWidth = Cond.getScalarValueSizeInBits();
36732 if (BitWidth < 8 || BitWidth > 64)
36735 // We can only handle the cases where VSELECT is directly legal on the
36736 // subtarget. We custom lower VSELECT nodes with constant conditions and
36737 // this makes it hard to see whether a dynamic VSELECT will correctly
36738 // lower, so we both check the operation's status and explicitly handle the
36739 // cases where a *dynamic* blend will fail even though a constant-condition
36740 // blend could be custom lowered.
36741 // FIXME: We should find a better way to handle this class of problems.
36742 // Potentially, we should combine constant-condition vselect nodes
36743 // pre-legalization into shuffles and not mark as many types as custom
36745 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36746 EVT VT = N->getValueType(0);
36747 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
36749 // FIXME: We don't support i16-element blends currently. We could and
36750 // should support them by making *all* the bits in the condition be set
36751 // rather than just the high bit and using an i8-element blend.
36752 if (VT.getVectorElementType() == MVT::i16)
36754 // Dynamic blending was only available from SSE4.1 onward.
36755 if (VT.is128BitVector() && !Subtarget.hasSSE41())
36757 // Byte blends are only available in AVX2
36758 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
36760 // There are no 512-bit blend instructions that use sign bits.
36761 if (VT.is512BitVector())
36764 // TODO: Add other opcodes eventually lowered into BLEND.
36765 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
36767 if ((UI->getOpcode() != ISD::VSELECT &&
36768 UI->getOpcode() != X86ISD::BLENDV) ||
36769 UI.getOperandNo() != 0)
36772 APInt DemandedMask(APInt::getSignMask(BitWidth));
36774 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
36775 !DCI.isBeforeLegalizeOps());
36776 if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
36779 // If we changed the computation somewhere in the DAG, this change will
36780 // affect all users of Cond. Update all the nodes so that we do not use
36781 // the generic VSELECT anymore. Otherwise, we may perform wrong
36782 // optimizations as we messed with the actual expectation for the vector
36784 for (SDNode *U : Cond->uses()) {
36785 if (U->getOpcode() == X86ISD::BLENDV)
36788 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
36789 Cond, U->getOperand(1), U->getOperand(2));
36790 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
36791 DCI.AddToWorklist(U);
36793 DCI.CommitTargetLoweringOpt(TLO);
36794 return SDValue(N, 0);
36797 /// Do target-specific dag combines on SELECT and VSELECT nodes.
36798 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
36799 TargetLowering::DAGCombinerInfo &DCI,
36800 const X86Subtarget &Subtarget) {
36802 SDValue Cond = N->getOperand(0);
36803 SDValue LHS = N->getOperand(1);
36804 SDValue RHS = N->getOperand(2);
36806 // Try simplification again because we use this function to optimize
36807 // BLENDV nodes that are not handled by the generic combiner.
36808 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
36811 EVT VT = LHS.getValueType();
36812 EVT CondVT = Cond.getValueType();
36813 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36815 // Convert vselects with constant condition into shuffles.
36816 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
36817 DCI.isBeforeLegalizeOps()) {
36818 SmallVector<int, 64> Mask;
36819 if (createShuffleMaskFromVSELECT(Mask, Cond))
36820 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
36823 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
36824 // instructions match the semantics of the common C idiom x<y?x:y but not
36825 // x<=y?x:y, because of how they handle negative zero (which can be
36826 // ignored in unsafe-math mode).
36827 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
36828 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
36829 VT != MVT::f80 && VT != MVT::f128 &&
36830 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
36831 (Subtarget.hasSSE2() ||
36832 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
36833 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36835 unsigned Opcode = 0;
36836 // Check for x CC y ? x : y.
36837 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
36838 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
36842 // Converting this to a min would handle NaNs incorrectly, and swapping
36843 // the operands would cause it to handle comparisons between positive
36844 // and negative zero incorrectly.
36845 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36846 if (!DAG.getTarget().Options.UnsafeFPMath &&
36847 !(DAG.isKnownNeverZeroFloat(LHS) ||
36848 DAG.isKnownNeverZeroFloat(RHS)))
36850 std::swap(LHS, RHS);
36852 Opcode = X86ISD::FMIN;
36855 // Converting this to a min would handle comparisons between positive
36856 // and negative zero incorrectly.
36857 if (!DAG.getTarget().Options.UnsafeFPMath &&
36858 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36860 Opcode = X86ISD::FMIN;
36863 // Converting this to a min would handle both negative zeros and NaNs
36864 // incorrectly, but we can swap the operands to fix both.
36865 std::swap(LHS, RHS);
36870 Opcode = X86ISD::FMIN;
36874 // Converting this to a max would handle comparisons between positive
36875 // and negative zero incorrectly.
36876 if (!DAG.getTarget().Options.UnsafeFPMath &&
36877 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36879 Opcode = X86ISD::FMAX;
36882 // Converting this to a max would handle NaNs incorrectly, and swapping
36883 // the operands would cause it to handle comparisons between positive
36884 // and negative zero incorrectly.
36885 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36886 if (!DAG.getTarget().Options.UnsafeFPMath &&
36887 !(DAG.isKnownNeverZeroFloat(LHS) ||
36888 DAG.isKnownNeverZeroFloat(RHS)))
36890 std::swap(LHS, RHS);
36892 Opcode = X86ISD::FMAX;
36895 // Converting this to a max would handle both negative zeros and NaNs
36896 // incorrectly, but we can swap the operands to fix both.
36897 std::swap(LHS, RHS);
36902 Opcode = X86ISD::FMAX;
36905 // Check for x CC y ? y : x -- a min/max with reversed arms.
36906 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
36907 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
36911 // Converting this to a min would handle comparisons between positive
36912 // and negative zero incorrectly, and swapping the operands would
36913 // cause it to handle NaNs incorrectly.
36914 if (!DAG.getTarget().Options.UnsafeFPMath &&
36915 !(DAG.isKnownNeverZeroFloat(LHS) ||
36916 DAG.isKnownNeverZeroFloat(RHS))) {
36917 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36919 std::swap(LHS, RHS);
36921 Opcode = X86ISD::FMIN;
36924 // Converting this to a min would handle NaNs incorrectly.
36925 if (!DAG.getTarget().Options.UnsafeFPMath &&
36926 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
36928 Opcode = X86ISD::FMIN;
36931 // Converting this to a min would handle both negative zeros and NaNs
36932 // incorrectly, but we can swap the operands to fix both.
36933 std::swap(LHS, RHS);
36938 Opcode = X86ISD::FMIN;
36942 // Converting this to a max would handle NaNs incorrectly.
36943 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36945 Opcode = X86ISD::FMAX;
36948 // Converting this to a max would handle comparisons between positive
36949 // and negative zero incorrectly, and swapping the operands would
36950 // cause it to handle NaNs incorrectly.
36951 if (!DAG.getTarget().Options.UnsafeFPMath &&
36952 !DAG.isKnownNeverZeroFloat(LHS) &&
36953 !DAG.isKnownNeverZeroFloat(RHS)) {
36954 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36956 std::swap(LHS, RHS);
36958 Opcode = X86ISD::FMAX;
36961 // Converting this to a max would handle both negative zeros and NaNs
36962 // incorrectly, but we can swap the operands to fix both.
36963 std::swap(LHS, RHS);
36968 Opcode = X86ISD::FMAX;
36974 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
36977 // Some mask scalar intrinsics rely on checking if only one bit is set
36978 // and implement it in C code like this:
36979 // A[0] = (U & 1) ? A[0] : W[0];
36980 // This creates some redundant instructions that break pattern matching.
36981 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
36982 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
36983 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
36984 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36985 SDValue AndNode = Cond.getOperand(0);
36986 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
36987 isNullConstant(Cond.getOperand(1)) &&
36988 isOneConstant(AndNode.getOperand(1))) {
36989 // LHS and RHS swapped due to
36990 // setcc outputting 1 when AND resulted in 0 and vice versa.
36991 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
36992 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
36996 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
36997 // lowering on KNL. In this case we convert it to
36998 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
36999 // The same situation all vectors of i8 and i16 without BWI.
37000 // Make sure we extend these even before type legalization gets a chance to
37001 // split wide vectors.
37002 // Since SKX these selects have a proper lowering.
37003 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
37004 CondVT.getVectorElementType() == MVT::i1 &&
37005 (VT.getVectorElementType() == MVT::i8 ||
37006 VT.getVectorElementType() == MVT::i16)) {
37007 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
37008 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
37011 // AVX512 - Extend select with zero to merge with target shuffle.
37012 // select(mask, extract_subvector(shuffle(x)), zero) -->
37013 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
37014 // TODO - support non target shuffles as well.
37015 if (Subtarget.hasAVX512() && CondVT.isVector() &&
37016 CondVT.getVectorElementType() == MVT::i1) {
37017 auto SelectableOp = [&TLI](SDValue Op) {
37018 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
37019 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
37020 isNullConstant(Op.getOperand(1)) &&
37021 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
37022 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
37025 bool SelectableLHS = SelectableOp(LHS);
37026 bool SelectableRHS = SelectableOp(RHS);
37027 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
37028 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
37030 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
37031 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
37032 : RHS.getOperand(0).getValueType();
37033 unsigned NumSrcElts = SrcVT.getVectorNumElements();
37034 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
37035 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
37036 VT.getSizeInBits());
37037 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
37038 VT.getSizeInBits());
37039 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
37040 DAG.getUNDEF(SrcCondVT), Cond,
37041 DAG.getIntPtrConstant(0, DL));
37042 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
37043 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
37047 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
37050 // Canonicalize max and min:
37051 // (x > y) ? x : y -> (x >= y) ? x : y
37052 // (x < y) ? x : y -> (x <= y) ? x : y
37053 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
37054 // the need for an extra compare
37055 // against zero. e.g.
37056 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
37058 // testl %edi, %edi
37060 // cmovgl %edi, %eax
37064 // cmovsl %eax, %edi
37065 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
37066 Cond.hasOneUse() &&
37067 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
37068 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
37069 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37074 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
37075 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
37076 Cond.getOperand(0), Cond.getOperand(1), NewCC);
37077 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
37082 // Match VSELECTs into subs with unsigned saturation.
37083 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
37084 // psubus is available in SSE2 for i8 and i16 vectors.
37085 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
37086 isPowerOf2_32(VT.getVectorNumElements()) &&
37087 (VT.getVectorElementType() == MVT::i8 ||
37088 VT.getVectorElementType() == MVT::i16)) {
37089 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37091 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
37092 // left side invert the predicate to simplify logic below.
37094 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
37096 CC = ISD::getSetCCInverse(CC, true);
37097 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
37101 if (Other.getNode() && Other->getNumOperands() == 2 &&
37102 Other->getOperand(0) == Cond.getOperand(0)) {
37103 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
37104 SDValue CondRHS = Cond->getOperand(1);
37106 // Look for a general sub with unsigned saturation first.
37107 // x >= y ? x-y : 0 --> subus x, y
37108 // x > y ? x-y : 0 --> subus x, y
37109 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
37110 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
37111 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37113 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
37114 if (isa<BuildVectorSDNode>(CondRHS)) {
37115 // If the RHS is a constant we have to reverse the const
37116 // canonicalization.
37117 // x > C-1 ? x+-C : 0 --> subus x, C
37118 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
37119 return (!Op && !Cond) ||
37121 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
37123 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
37124 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
37125 /*AllowUndefs*/ true)) {
37126 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37128 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37131 // Another special case: If C was a sign bit, the sub has been
37132 // canonicalized into a xor.
37133 // FIXME: Would it be better to use computeKnownBits to determine
37134 // whether it's safe to decanonicalize the xor?
37135 // x s< 0 ? x^C : 0 --> subus x, C
37136 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
37137 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
37138 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
37139 OpRHSConst->getAPIntValue().isSignMask()) {
37140 // Note that we have to rebuild the RHS constant here to ensure we
37141 // don't rely on particular values of undef lanes.
37142 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
37143 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37151 // Match VSELECTs into add with unsigned saturation.
37152 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
37153 // paddus is available in SSE2 for i8 and i16 vectors.
37154 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
37155 isPowerOf2_32(VT.getVectorNumElements()) &&
37156 (VT.getVectorElementType() == MVT::i8 ||
37157 VT.getVectorElementType() == MVT::i16)) {
37158 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37160 SDValue CondLHS = Cond->getOperand(0);
37161 SDValue CondRHS = Cond->getOperand(1);
37163 // Check if one of the arms of the VSELECT is vector with all bits set.
37164 // If it's on the left side invert the predicate to simplify logic below.
37166 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
37168 CC = ISD::getSetCCInverse(CC, true);
37169 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
37173 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
37174 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
37176 // Canonicalize condition operands.
37177 if (CC == ISD::SETUGE) {
37178 std::swap(CondLHS, CondRHS);
37182 // We can test against either of the addition operands.
37183 // x <= x+y ? x+y : ~0 --> addus x, y
37184 // x+y >= x ? x+y : ~0 --> addus x, y
37185 if (CC == ISD::SETULE && Other == CondRHS &&
37186 (OpLHS == CondLHS || OpRHS == CondLHS))
37187 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
37189 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
37190 CondLHS == OpLHS) {
37191 // If the RHS is a constant we have to reverse the const
37192 // canonicalization.
37193 // x > ~C ? x+C : ~0 --> addus x, C
37194 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
37195 return Cond->getAPIntValue() == ~Op->getAPIntValue();
37197 if (CC == ISD::SETULE &&
37198 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
37199 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
37204 // Early exit check
37205 if (!TLI.isTypeLegal(VT))
37208 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
37211 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
37214 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
37217 // select(~Cond, X, Y) -> select(Cond, Y, X)
37218 if (CondVT.getScalarType() != MVT::i1)
37219 if (SDValue CondNot = IsNOT(Cond, DAG))
37220 return DAG.getNode(N->getOpcode(), DL, VT,
37221 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
37223 // Custom action for SELECT MMX
37224 if (VT == MVT::x86mmx) {
37225 LHS = DAG.getBitcast(MVT::i64, LHS);
37226 RHS = DAG.getBitcast(MVT::i64, RHS);
37227 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
37228 return DAG.getBitcast(VT, newSelect);
37235 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
37237 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
37238 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
37239 /// Note that this is only legal for some op/cc combinations.
37240 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
37242 const X86Subtarget &Subtarget) {
37243 // This combine only operates on CMP-like nodes.
37244 if (!(Cmp.getOpcode() == X86ISD::CMP ||
37245 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
37248 // Can't replace the cmp if it has more uses than the one we're looking at.
37249 // FIXME: We would like to be able to handle this, but would need to make sure
37250 // all uses were updated.
37251 if (!Cmp.hasOneUse())
37254 // This only applies to variations of the common case:
37255 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
37256 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
37257 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
37258 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
37259 // Using the proper condcodes (see below), overflow is checked for.
37261 // FIXME: We can generalize both constraints:
37262 // - XOR/OR/AND (if they were made to survive AtomicExpand)
37264 // if the result is compared.
37266 SDValue CmpLHS = Cmp.getOperand(0);
37267 SDValue CmpRHS = Cmp.getOperand(1);
37269 if (!CmpLHS.hasOneUse())
37272 unsigned Opc = CmpLHS.getOpcode();
37273 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
37276 SDValue OpRHS = CmpLHS.getOperand(2);
37277 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
37281 APInt Addend = OpRHSC->getAPIntValue();
37282 if (Opc == ISD::ATOMIC_LOAD_SUB)
37285 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
37289 APInt Comparison = CmpRHSC->getAPIntValue();
37291 // If the addend is the negation of the comparison value, then we can do
37292 // a full comparison by emitting the atomic arithmetic as a locked sub.
37293 if (Comparison == -Addend) {
37294 // The CC is fine, but we need to rewrite the LHS of the comparison as an
37296 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
37297 auto AtomicSub = DAG.getAtomic(
37298 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
37299 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
37300 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
37301 AN->getMemOperand());
37302 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
37303 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
37304 DAG.getUNDEF(CmpLHS.getValueType()));
37305 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
37309 // We can handle comparisons with zero in a number of cases by manipulating
37311 if (!Comparison.isNullValue())
37314 if (CC == X86::COND_S && Addend == 1)
37316 else if (CC == X86::COND_NS && Addend == 1)
37318 else if (CC == X86::COND_G && Addend == -1)
37320 else if (CC == X86::COND_LE && Addend == -1)
37325 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
37326 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
37327 DAG.getUNDEF(CmpLHS.getValueType()));
37328 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
37332 // Check whether a boolean test is testing a boolean value generated by
37333 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
37336 // Simplify the following patterns:
37337 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
37338 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
37339 // to (Op EFLAGS Cond)
37341 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
37342 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
37343 // to (Op EFLAGS !Cond)
37345 // where Op could be BRCOND or CMOV.
37347 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
37348 // This combine only operates on CMP-like nodes.
37349 if (!(Cmp.getOpcode() == X86ISD::CMP ||
37350 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
37353 // Quit if not used as a boolean value.
37354 if (CC != X86::COND_E && CC != X86::COND_NE)
37357 // Check CMP operands. One of them should be 0 or 1 and the other should be
37358 // an SetCC or extended from it.
37359 SDValue Op1 = Cmp.getOperand(0);
37360 SDValue Op2 = Cmp.getOperand(1);
37363 const ConstantSDNode* C = nullptr;
37364 bool needOppositeCond = (CC == X86::COND_E);
37365 bool checkAgainstTrue = false; // Is it a comparison against 1?
37367 if ((C = dyn_cast<ConstantSDNode>(Op1)))
37369 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
37371 else // Quit if all operands are not constants.
37374 if (C->getZExtValue() == 1) {
37375 needOppositeCond = !needOppositeCond;
37376 checkAgainstTrue = true;
37377 } else if (C->getZExtValue() != 0)
37378 // Quit if the constant is neither 0 or 1.
37381 bool truncatedToBoolWithAnd = false;
37382 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
37383 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
37384 SetCC.getOpcode() == ISD::TRUNCATE ||
37385 SetCC.getOpcode() == ISD::AND) {
37386 if (SetCC.getOpcode() == ISD::AND) {
37388 if (isOneConstant(SetCC.getOperand(0)))
37390 if (isOneConstant(SetCC.getOperand(1)))
37394 SetCC = SetCC.getOperand(OpIdx);
37395 truncatedToBoolWithAnd = true;
37397 SetCC = SetCC.getOperand(0);
37400 switch (SetCC.getOpcode()) {
37401 case X86ISD::SETCC_CARRY:
37402 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
37403 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
37404 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
37405 // truncated to i1 using 'and'.
37406 if (checkAgainstTrue && !truncatedToBoolWithAnd)
37408 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
37409 "Invalid use of SETCC_CARRY!");
37411 case X86ISD::SETCC:
37412 // Set the condition code or opposite one if necessary.
37413 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
37414 if (needOppositeCond)
37415 CC = X86::GetOppositeBranchCondition(CC);
37416 return SetCC.getOperand(1);
37417 case X86ISD::CMOV: {
37418 // Check whether false/true value has canonical one, i.e. 0 or 1.
37419 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
37420 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
37421 // Quit if true value is not a constant.
37424 // Quit if false value is not a constant.
37426 SDValue Op = SetCC.getOperand(0);
37427 // Skip 'zext' or 'trunc' node.
37428 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
37429 Op.getOpcode() == ISD::TRUNCATE)
37430 Op = Op.getOperand(0);
37431 // A special case for rdrand/rdseed, where 0 is set if false cond is
37433 if ((Op.getOpcode() != X86ISD::RDRAND &&
37434 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
37437 // Quit if false value is not the constant 0 or 1.
37438 bool FValIsFalse = true;
37439 if (FVal && FVal->getZExtValue() != 0) {
37440 if (FVal->getZExtValue() != 1)
37442 // If FVal is 1, opposite cond is needed.
37443 needOppositeCond = !needOppositeCond;
37444 FValIsFalse = false;
37446 // Quit if TVal is not the constant opposite of FVal.
37447 if (FValIsFalse && TVal->getZExtValue() != 1)
37449 if (!FValIsFalse && TVal->getZExtValue() != 0)
37451 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
37452 if (needOppositeCond)
37453 CC = X86::GetOppositeBranchCondition(CC);
37454 return SetCC.getOperand(3);
37461 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
37463 /// (X86or (X86setcc) (X86setcc))
37464 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
37465 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
37466 X86::CondCode &CC1, SDValue &Flags,
37468 if (Cond->getOpcode() == X86ISD::CMP) {
37469 if (!isNullConstant(Cond->getOperand(1)))
37472 Cond = Cond->getOperand(0);
37477 SDValue SetCC0, SetCC1;
37478 switch (Cond->getOpcode()) {
37479 default: return false;
37486 SetCC0 = Cond->getOperand(0);
37487 SetCC1 = Cond->getOperand(1);
37491 // Make sure we have SETCC nodes, using the same flags value.
37492 if (SetCC0.getOpcode() != X86ISD::SETCC ||
37493 SetCC1.getOpcode() != X86ISD::SETCC ||
37494 SetCC0->getOperand(1) != SetCC1->getOperand(1))
37497 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
37498 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
37499 Flags = SetCC0->getOperand(1);
37503 // When legalizing carry, we create carries via add X, -1
37504 // If that comes from an actual carry, via setcc, we use the
37506 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
37507 if (EFLAGS.getOpcode() == X86ISD::ADD) {
37508 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
37509 SDValue Carry = EFLAGS.getOperand(0);
37510 while (Carry.getOpcode() == ISD::TRUNCATE ||
37511 Carry.getOpcode() == ISD::ZERO_EXTEND ||
37512 Carry.getOpcode() == ISD::SIGN_EXTEND ||
37513 Carry.getOpcode() == ISD::ANY_EXTEND ||
37514 (Carry.getOpcode() == ISD::AND &&
37515 isOneConstant(Carry.getOperand(1))))
37516 Carry = Carry.getOperand(0);
37517 if (Carry.getOpcode() == X86ISD::SETCC ||
37518 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
37519 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
37520 uint64_t CarryCC = Carry.getConstantOperandVal(0);
37521 SDValue CarryOp1 = Carry.getOperand(1);
37522 if (CarryCC == X86::COND_B)
37524 if (CarryCC == X86::COND_A) {
37525 // Try to convert COND_A into COND_B in an attempt to facilitate
37526 // materializing "setb reg".
37528 // Do not flip "e > c", where "c" is a constant, because Cmp
37529 // instruction cannot take an immediate as its first operand.
37531 if (CarryOp1.getOpcode() == X86ISD::SUB &&
37532 CarryOp1.getNode()->hasOneUse() &&
37533 CarryOp1.getValueType().isInteger() &&
37534 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
37535 SDValue SubCommute =
37536 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
37537 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
37538 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
37541 // If this is a check of the z flag of an add with 1, switch to the
37543 if (CarryCC == X86::COND_E &&
37544 CarryOp1.getOpcode() == X86ISD::ADD &&
37545 isOneConstant(CarryOp1.getOperand(1)))
37554 /// Optimize an EFLAGS definition used according to the condition code \p CC
37555 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
37556 /// uses of chain values.
37557 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
37559 const X86Subtarget &Subtarget) {
37560 if (CC == X86::COND_B)
37561 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
37564 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
37566 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
37569 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
37570 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
37571 TargetLowering::DAGCombinerInfo &DCI,
37572 const X86Subtarget &Subtarget) {
37575 SDValue FalseOp = N->getOperand(0);
37576 SDValue TrueOp = N->getOperand(1);
37577 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
37578 SDValue Cond = N->getOperand(3);
37580 // cmov X, X, ?, ? --> X
37581 if (TrueOp == FalseOp)
37584 // Try to simplify the EFLAGS and condition code operands.
37585 // We can't always do this as FCMOV only supports a subset of X86 cond.
37586 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
37587 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
37588 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
37590 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37594 // If this is a select between two integer constants, try to do some
37595 // optimizations. Note that the operands are ordered the opposite of SELECT
37597 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
37598 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
37599 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
37600 // larger than FalseC (the false value).
37601 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
37602 CC = X86::GetOppositeBranchCondition(CC);
37603 std::swap(TrueC, FalseC);
37604 std::swap(TrueOp, FalseOp);
37607 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
37608 // This is efficient for any integer data type (including i8/i16) and
37610 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
37611 Cond = getSETCC(CC, Cond, DL, DAG);
37613 // Zero extend the condition if needed.
37614 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
37616 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
37617 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
37618 DAG.getConstant(ShAmt, DL, MVT::i8));
37622 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
37623 // for any integer data type, including i8/i16.
37624 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
37625 Cond = getSETCC(CC, Cond, DL, DAG);
37627 // Zero extend the condition if needed.
37628 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
37629 FalseC->getValueType(0), Cond);
37630 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
37631 SDValue(FalseC, 0));
37635 // Optimize cases that will turn into an LEA instruction. This requires
37636 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
37637 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
37638 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
37639 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
37640 "Implicit constant truncation");
37642 bool isFastMultiplier = false;
37643 if (Diff.ult(10)) {
37644 switch (Diff.getZExtValue()) {
37646 case 1: // result = add base, cond
37647 case 2: // result = lea base( , cond*2)
37648 case 3: // result = lea base(cond, cond*2)
37649 case 4: // result = lea base( , cond*4)
37650 case 5: // result = lea base(cond, cond*4)
37651 case 8: // result = lea base( , cond*8)
37652 case 9: // result = lea base(cond, cond*8)
37653 isFastMultiplier = true;
37658 if (isFastMultiplier) {
37659 Cond = getSETCC(CC, Cond, DL ,DAG);
37660 // Zero extend the condition if needed.
37661 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
37663 // Scale the condition by the difference.
37665 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
37666 DAG.getConstant(Diff, DL, Cond.getValueType()));
37668 // Add the base if non-zero.
37669 if (FalseC->getAPIntValue() != 0)
37670 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
37671 SDValue(FalseC, 0));
37678 // Handle these cases:
37679 // (select (x != c), e, c) -> select (x != c), e, x),
37680 // (select (x == c), c, e) -> select (x == c), x, e)
37681 // where the c is an integer constant, and the "select" is the combination
37682 // of CMOV and CMP.
37684 // The rationale for this change is that the conditional-move from a constant
37685 // needs two instructions, however, conditional-move from a register needs
37686 // only one instruction.
37688 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
37689 // some instruction-combining opportunities. This opt needs to be
37690 // postponed as late as possible.
37692 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
37693 // the DCI.xxxx conditions are provided to postpone the optimization as
37694 // late as possible.
37696 ConstantSDNode *CmpAgainst = nullptr;
37697 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
37698 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
37699 !isa<ConstantSDNode>(Cond.getOperand(0))) {
37701 if (CC == X86::COND_NE &&
37702 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
37703 CC = X86::GetOppositeBranchCondition(CC);
37704 std::swap(TrueOp, FalseOp);
37707 if (CC == X86::COND_E &&
37708 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
37709 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
37710 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
37711 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37716 // Fold and/or of setcc's to double CMOV:
37717 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
37718 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
37720 // This combine lets us generate:
37721 // cmovcc1 (jcc1 if we don't have CMOV)
37727 // cmovne (jne if we don't have CMOV)
37728 // When we can't use the CMOV instruction, it might increase branch
37730 // When we can use CMOV, or when there is no mispredict, this improves
37731 // throughput and reduces register pressure.
37733 if (CC == X86::COND_NE) {
37735 X86::CondCode CC0, CC1;
37737 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
37739 std::swap(FalseOp, TrueOp);
37740 CC0 = X86::GetOppositeBranchCondition(CC0);
37741 CC1 = X86::GetOppositeBranchCondition(CC1);
37744 SDValue LOps[] = {FalseOp, TrueOp,
37745 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
37746 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
37747 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
37749 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37754 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
37755 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
37756 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
37757 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
37758 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
37759 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
37760 SDValue Add = TrueOp;
37761 SDValue Const = FalseOp;
37762 // Canonicalize the condition code for easier matching and output.
37763 if (CC == X86::COND_E)
37764 std::swap(Add, Const);
37766 // We might have replaced the constant in the cmov with the LHS of the
37767 // compare. If so change it to the RHS of the compare.
37768 if (Const == Cond.getOperand(0))
37769 Const = Cond.getOperand(1);
37771 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
37772 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
37773 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
37774 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
37775 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
37776 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
37777 EVT VT = N->getValueType(0);
37778 // This should constant fold.
37779 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
37781 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
37782 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
37783 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
37790 /// Different mul shrinking modes.
37791 enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
37793 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
37794 EVT VT = N->getOperand(0).getValueType();
37795 if (VT.getScalarSizeInBits() != 32)
37798 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
37799 unsigned SignBits[2] = {1, 1};
37800 bool IsPositive[2] = {false, false};
37801 for (unsigned i = 0; i < 2; i++) {
37802 SDValue Opd = N->getOperand(i);
37804 SignBits[i] = DAG.ComputeNumSignBits(Opd);
37805 IsPositive[i] = DAG.SignBitIsZero(Opd);
37808 bool AllPositive = IsPositive[0] && IsPositive[1];
37809 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
37810 // When ranges are from -128 ~ 127, use MULS8 mode.
37811 if (MinSignBits >= 25)
37813 // When ranges are from 0 ~ 255, use MULU8 mode.
37814 else if (AllPositive && MinSignBits >= 24)
37816 // When ranges are from -32768 ~ 32767, use MULS16 mode.
37817 else if (MinSignBits >= 17)
37819 // When ranges are from 0 ~ 65535, use MULU16 mode.
37820 else if (AllPositive && MinSignBits >= 16)
37827 /// When the operands of vector mul are extended from smaller size values,
37828 /// like i8 and i16, the type of mul may be shrinked to generate more
37829 /// efficient code. Two typical patterns are handled:
37831 /// %2 = sext/zext <N x i8> %1 to <N x i32>
37832 /// %4 = sext/zext <N x i8> %3 to <N x i32>
37833 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37834 /// %5 = mul <N x i32> %2, %4
37837 /// %2 = zext/sext <N x i16> %1 to <N x i32>
37838 /// %4 = zext/sext <N x i16> %3 to <N x i32>
37839 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37840 /// %5 = mul <N x i32> %2, %4
37842 /// There are four mul shrinking modes:
37843 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
37844 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
37845 /// generate pmullw+sext32 for it (MULS8 mode).
37846 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
37847 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
37848 /// generate pmullw+zext32 for it (MULU8 mode).
37849 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
37850 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
37851 /// generate pmullw+pmulhw for it (MULS16 mode).
37852 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
37853 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
37854 /// generate pmullw+pmulhuw for it (MULU16 mode).
37855 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
37856 const X86Subtarget &Subtarget) {
37857 // Check for legality
37858 // pmullw/pmulhw are not supported by SSE.
37859 if (!Subtarget.hasSSE2())
37862 // Check for profitability
37863 // pmulld is supported since SSE41. It is better to use pmulld
37864 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
37866 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
37867 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
37871 if (!canReduceVMulWidth(N, DAG, Mode))
37875 SDValue N0 = N->getOperand(0);
37876 SDValue N1 = N->getOperand(1);
37877 EVT VT = N->getOperand(0).getValueType();
37878 unsigned NumElts = VT.getVectorNumElements();
37879 if ((NumElts % 2) != 0)
37882 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
37884 // Shrink the operands of mul.
37885 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
37886 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
37888 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
37889 // lower part is needed.
37890 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
37891 if (Mode == MULU8 || Mode == MULS8)
37892 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
37895 MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
37896 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
37897 // the higher part is also needed.
37898 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37899 ReducedVT, NewN0, NewN1);
37901 // Repack the lower part and higher part result of mul into a wider
37903 // Generate shuffle functioning as punpcklwd.
37904 SmallVector<int, 16> ShuffleMask(NumElts);
37905 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37906 ShuffleMask[2 * i] = i;
37907 ShuffleMask[2 * i + 1] = i + NumElts;
37910 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37911 ResLo = DAG.getBitcast(ResVT, ResLo);
37912 // Generate shuffle functioning as punpckhwd.
37913 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37914 ShuffleMask[2 * i] = i + NumElts / 2;
37915 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
37918 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37919 ResHi = DAG.getBitcast(ResVT, ResHi);
37920 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
37923 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
37924 EVT VT, const SDLoc &DL) {
37926 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
37927 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37928 DAG.getConstant(Mult, DL, VT));
37929 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
37930 DAG.getConstant(Shift, DL, MVT::i8));
37931 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37936 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
37937 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37938 DAG.getConstant(Mul1, DL, VT));
37939 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
37940 DAG.getConstant(Mul2, DL, VT));
37941 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37950 // mul x, 11 => add ((shl (mul x, 5), 1), x)
37951 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
37953 // mul x, 21 => add ((shl (mul x, 5), 2), x)
37954 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
37956 // mul x, 41 => add ((shl (mul x, 5), 3), x)
37957 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
37959 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
37960 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37961 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
37963 // mul x, 19 => add ((shl (mul x, 9), 1), x)
37964 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
37966 // mul x, 37 => add ((shl (mul x, 9), 2), x)
37967 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
37969 // mul x, 73 => add ((shl (mul x, 9), 3), x)
37970 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
37972 // mul x, 13 => add ((shl (mul x, 3), 2), x)
37973 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
37975 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
37976 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
37978 // mul x, 26 => add ((mul (mul x, 5), 5), x)
37979 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
37981 // mul x, 28 => add ((mul (mul x, 9), 3), x)
37982 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
37984 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
37985 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37986 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
37989 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
37990 // by a single LEA.
37991 // First check if this a sum of two power of 2s because that's easy. Then
37992 // count how many zeros are up to the first bit.
37993 // TODO: We can do this even without LEA at a cost of two shifts and an add.
37994 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
37995 unsigned ScaleShift = countTrailingZeros(MulAmt);
37996 if (ScaleShift >= 1 && ScaleShift < 4) {
37997 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
37998 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37999 DAG.getConstant(ShiftAmt, DL, MVT::i8));
38000 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38001 DAG.getConstant(ScaleShift, DL, MVT::i8));
38002 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
38009 // If the upper 17 bits of each element are zero then we can use PMADDWD,
38010 // which is always at least as quick as PMULLD, except on KNL.
38011 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
38012 const X86Subtarget &Subtarget) {
38013 if (!Subtarget.hasSSE2())
38016 if (Subtarget.isPMADDWDSlow())
38019 EVT VT = N->getValueType(0);
38021 // Only support vXi32 vectors.
38022 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
38025 // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
38026 // Also allow v2i32 if it will be widened.
38027 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
38028 if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
38031 SDValue N0 = N->getOperand(0);
38032 SDValue N1 = N->getOperand(1);
38034 // If we are zero extending two steps without SSE4.1, its better to reduce
38035 // the vmul width instead.
38036 if (!Subtarget.hasSSE41() &&
38037 (N0.getOpcode() == ISD::ZERO_EXTEND &&
38038 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
38039 (N1.getOpcode() == ISD::ZERO_EXTEND &&
38040 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
38043 APInt Mask17 = APInt::getHighBitsSet(32, 17);
38044 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
38045 !DAG.MaskedValueIsZero(N0, Mask17))
38048 // Use SplitOpsAndApply to handle AVX splitting.
38049 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38050 ArrayRef<SDValue> Ops) {
38051 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
38052 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
38054 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
38055 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
38059 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
38060 const X86Subtarget &Subtarget) {
38061 if (!Subtarget.hasSSE2())
38064 EVT VT = N->getValueType(0);
38066 // Only support vXi64 vectors.
38067 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
38068 VT.getVectorNumElements() < 2 ||
38069 !isPowerOf2_32(VT.getVectorNumElements()))
38072 SDValue N0 = N->getOperand(0);
38073 SDValue N1 = N->getOperand(1);
38075 // MULDQ returns the 64-bit result of the signed multiplication of the lower
38076 // 32-bits. We can lower with this if the sign bits stretch that far.
38077 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
38078 DAG.ComputeNumSignBits(N1) > 32) {
38079 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38080 ArrayRef<SDValue> Ops) {
38081 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
38083 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
38084 PMULDQBuilder, /*CheckBWI*/false);
38087 // If the upper bits are zero we can use a single pmuludq.
38088 APInt Mask = APInt::getHighBitsSet(64, 32);
38089 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
38090 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38091 ArrayRef<SDValue> Ops) {
38092 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
38094 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
38095 PMULUDQBuilder, /*CheckBWI*/false);
38101 /// Optimize a single multiply with constant into two operations in order to
38102 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
38103 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
38104 TargetLowering::DAGCombinerInfo &DCI,
38105 const X86Subtarget &Subtarget) {
38106 EVT VT = N->getValueType(0);
38108 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
38111 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
38114 if (DCI.isBeforeLegalize() && VT.isVector())
38115 return reduceVMULWidth(N, DAG, Subtarget);
38117 if (!MulConstantOptimization)
38119 // An imul is usually smaller than the alternative sequence.
38120 if (DAG.getMachineFunction().getFunction().hasMinSize())
38123 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
38126 if (VT != MVT::i64 && VT != MVT::i32)
38129 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
38132 if (isPowerOf2_64(C->getZExtValue()))
38135 int64_t SignMulAmt = C->getSExtValue();
38136 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
38137 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
38140 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
38141 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
38142 DAG.getConstant(AbsMulAmt, DL, VT));
38143 if (SignMulAmt < 0)
38144 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38150 uint64_t MulAmt1 = 0;
38151 uint64_t MulAmt2 = 0;
38152 if ((AbsMulAmt % 9) == 0) {
38154 MulAmt2 = AbsMulAmt / 9;
38155 } else if ((AbsMulAmt % 5) == 0) {
38157 MulAmt2 = AbsMulAmt / 5;
38158 } else if ((AbsMulAmt % 3) == 0) {
38160 MulAmt2 = AbsMulAmt / 3;
38164 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
38166 (isPowerOf2_64(MulAmt2) ||
38167 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
38169 if (isPowerOf2_64(MulAmt2) &&
38170 !(SignMulAmt >= 0 && N->hasOneUse() &&
38171 N->use_begin()->getOpcode() == ISD::ADD))
38172 // If second multiplifer is pow2, issue it first. We want the multiply by
38173 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
38174 // is an add. Only do this for positive multiply amounts since the
38175 // negate would prevent it from being used as an address mode anyway.
38176 std::swap(MulAmt1, MulAmt2);
38178 if (isPowerOf2_64(MulAmt1))
38179 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38180 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
38182 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
38183 DAG.getConstant(MulAmt1, DL, VT));
38185 if (isPowerOf2_64(MulAmt2))
38186 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
38187 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
38189 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
38190 DAG.getConstant(MulAmt2, DL, VT));
38192 // Negate the result.
38193 if (SignMulAmt < 0)
38194 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38196 } else if (!Subtarget.slowLEA())
38197 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
38200 assert(C->getZExtValue() != 0 &&
38201 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
38202 "Both cases that could cause potential overflows should have "
38203 "already been handled.");
38204 if (isPowerOf2_64(AbsMulAmt - 1)) {
38205 // (mul x, 2^N + 1) => (add (shl x, N), x)
38206 NewMul = DAG.getNode(
38207 ISD::ADD, DL, VT, N->getOperand(0),
38208 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38209 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
38211 // To negate, subtract the number from zero
38212 if (SignMulAmt < 0)
38213 NewMul = DAG.getNode(ISD::SUB, DL, VT,
38214 DAG.getConstant(0, DL, VT), NewMul);
38215 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
38216 // (mul x, 2^N - 1) => (sub (shl x, N), x)
38217 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38218 DAG.getConstant(Log2_64(AbsMulAmt + 1),
38220 // To negate, reverse the operands of the subtract.
38221 if (SignMulAmt < 0)
38222 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
38224 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38225 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
38226 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
38227 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38228 DAG.getConstant(Log2_64(AbsMulAmt - 2),
38230 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
38231 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
38232 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
38233 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
38234 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38235 DAG.getConstant(Log2_64(AbsMulAmt + 2),
38237 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38238 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38245 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
38246 SDValue N0 = N->getOperand(0);
38247 SDValue N1 = N->getOperand(1);
38248 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
38249 EVT VT = N0.getValueType();
38251 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
38252 // since the result of setcc_c is all zero's or all ones.
38253 if (VT.isInteger() && !VT.isVector() &&
38254 N1C && N0.getOpcode() == ISD::AND &&
38255 N0.getOperand(1).getOpcode() == ISD::Constant) {
38256 SDValue N00 = N0.getOperand(0);
38257 APInt Mask = N0.getConstantOperandAPInt(1);
38258 Mask <<= N1C->getAPIntValue();
38259 bool MaskOK = false;
38260 // We can handle cases concerning bit-widening nodes containing setcc_c if
38261 // we carefully interrogate the mask to make sure we are semantics
38263 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
38264 // of the underlying setcc_c operation if the setcc_c was zero extended.
38265 // Consider the following example:
38266 // zext(setcc_c) -> i32 0x0000FFFF
38267 // c1 -> i32 0x0000FFFF
38268 // c2 -> i32 0x00000001
38269 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
38270 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
38271 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
38273 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
38274 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
38276 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
38277 N00.getOpcode() == ISD::ANY_EXTEND) &&
38278 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
38279 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
38281 if (MaskOK && Mask != 0) {
38283 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
38287 // Hardware support for vector shifts is sparse which makes us scalarize the
38288 // vector operations in many cases. Also, on sandybridge ADD is faster than
38290 // (shl V, 1) -> add V,V
38291 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
38292 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
38293 assert(N0.getValueType().isVector() && "Invalid vector shift type");
38294 // We shift all of the values by one. In many cases we do not have
38295 // hardware support for this operation. This is better expressed as an ADD
38297 if (N1SplatC->getAPIntValue() == 1)
38298 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
38304 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
38305 SDValue N0 = N->getOperand(0);
38306 SDValue N1 = N->getOperand(1);
38307 EVT VT = N0.getValueType();
38308 unsigned Size = VT.getSizeInBits();
38310 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
38311 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
38312 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
38313 // depending on sign of (SarConst - [56,48,32,24,16])
38315 // sexts in X86 are MOVs. The MOVs have the same code size
38316 // as above SHIFTs (only SHIFT on 1 has lower code size).
38317 // However the MOVs have 2 advantages to a SHIFT:
38318 // 1. MOVs can write to a register that differs from source
38319 // 2. MOVs accept memory operands
38321 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
38322 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
38323 N0.getOperand(1).getOpcode() != ISD::Constant)
38326 SDValue N00 = N0.getOperand(0);
38327 SDValue N01 = N0.getOperand(1);
38328 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
38329 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
38330 EVT CVT = N1.getValueType();
38332 if (SarConst.isNegative())
38335 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
38336 unsigned ShiftSize = SVT.getSizeInBits();
38337 // skipping types without corresponding sext/zext and
38338 // ShlConst that is not one of [56,48,32,24,16]
38339 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
38343 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
38344 SarConst = SarConst - (Size - ShiftSize);
38347 else if (SarConst.isNegative())
38348 return DAG.getNode(ISD::SHL, DL, VT, NN,
38349 DAG.getConstant(-SarConst, DL, CVT));
38351 return DAG.getNode(ISD::SRA, DL, VT, NN,
38352 DAG.getConstant(SarConst, DL, CVT));
38357 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
38358 TargetLowering::DAGCombinerInfo &DCI) {
38359 SDValue N0 = N->getOperand(0);
38360 SDValue N1 = N->getOperand(1);
38361 EVT VT = N0.getValueType();
38363 // Only do this on the last DAG combine as it can interfere with other
38365 if (!DCI.isAfterLegalizeDAG())
38368 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
38369 // TODO: This is a generic DAG combine that became an x86-only combine to
38370 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
38371 // and-not ('andn').
38372 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
38375 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
38376 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
38377 if (!ShiftC || !AndC)
38380 // If we can shrink the constant mask below 8-bits or 32-bits, then this
38381 // transform should reduce code size. It may also enable secondary transforms
38382 // from improved known-bits analysis or instruction selection.
38383 APInt MaskVal = AndC->getAPIntValue();
38385 // If this can be matched by a zero extend, don't optimize.
38386 if (MaskVal.isMask()) {
38387 unsigned TO = MaskVal.countTrailingOnes();
38388 if (TO >= 8 && isPowerOf2_32(TO))
38392 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
38393 unsigned OldMaskSize = MaskVal.getMinSignedBits();
38394 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
38395 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
38396 (OldMaskSize > 32 && NewMaskSize <= 32)) {
38397 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
38399 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
38400 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
38401 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
38406 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
38407 TargetLowering::DAGCombinerInfo &DCI,
38408 const X86Subtarget &Subtarget) {
38409 unsigned Opcode = N->getOpcode();
38410 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
38411 "Unexpected shift opcode");
38413 EVT VT = N->getValueType(0);
38414 SDValue N0 = N->getOperand(0);
38415 SDValue N1 = N->getOperand(1);
38416 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
38417 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
38418 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
38419 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
38420 "Unexpected PACKSS/PACKUS input type");
38422 bool IsSigned = (X86ISD::PACKSS == Opcode);
38424 // Constant Folding.
38425 APInt UndefElts0, UndefElts1;
38426 SmallVector<APInt, 32> EltBits0, EltBits1;
38427 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
38428 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
38429 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
38430 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
38431 unsigned NumLanes = VT.getSizeInBits() / 128;
38432 unsigned NumDstElts = VT.getVectorNumElements();
38433 unsigned NumSrcElts = NumDstElts / 2;
38434 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
38435 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
38437 APInt Undefs(NumDstElts, 0);
38438 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
38439 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
38440 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
38441 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
38442 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
38443 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
38445 if (UndefElts[SrcIdx]) {
38446 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
38450 APInt &Val = EltBits[SrcIdx];
38452 // PACKSS: Truncate signed value with signed saturation.
38453 // Source values less than dst minint are saturated to minint.
38454 // Source values greater than dst maxint are saturated to maxint.
38455 if (Val.isSignedIntN(DstBitsPerElt))
38456 Val = Val.trunc(DstBitsPerElt);
38457 else if (Val.isNegative())
38458 Val = APInt::getSignedMinValue(DstBitsPerElt);
38460 Val = APInt::getSignedMaxValue(DstBitsPerElt);
38462 // PACKUS: Truncate signed value with unsigned saturation.
38463 // Source values less than zero are saturated to zero.
38464 // Source values greater than dst maxuint are saturated to maxuint.
38465 if (Val.isIntN(DstBitsPerElt))
38466 Val = Val.trunc(DstBitsPerElt);
38467 else if (Val.isNegative())
38468 Val = APInt::getNullValue(DstBitsPerElt);
38470 Val = APInt::getAllOnesValue(DstBitsPerElt);
38472 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
38476 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
38479 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
38480 // truncate to create a larger truncate.
38481 if (Subtarget.hasAVX512() &&
38482 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
38483 N0.getOperand(0).getValueType() == MVT::v8i32) {
38484 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
38486 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
38487 if (Subtarget.hasVLX())
38488 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
38490 // Widen input to v16i32 so we can truncate that.
38492 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
38493 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
38494 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
38498 // Attempt to combine as shuffle.
38500 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38506 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
38507 TargetLowering::DAGCombinerInfo &DCI,
38508 const X86Subtarget &Subtarget) {
38509 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
38510 X86ISD::VSRL == N->getOpcode()) &&
38511 "Unexpected shift opcode");
38512 EVT VT = N->getValueType(0);
38513 SDValue N0 = N->getOperand(0);
38514 SDValue N1 = N->getOperand(1);
38516 // Shift zero -> zero.
38517 if (ISD::isBuildVectorAllZeros(N0.getNode()))
38518 return DAG.getConstant(0, SDLoc(N), VT);
38520 // Detect constant shift amounts.
38522 SmallVector<APInt, 32> EltBits;
38523 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
38524 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
38525 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
38526 EltBits[0].getZExtValue(), DAG);
38529 APInt KnownUndef, KnownZero;
38530 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38531 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
38532 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
38534 return SDValue(N, 0);
38539 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
38540 TargetLowering::DAGCombinerInfo &DCI,
38541 const X86Subtarget &Subtarget) {
38542 unsigned Opcode = N->getOpcode();
38543 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
38544 X86ISD::VSRLI == Opcode) &&
38545 "Unexpected shift opcode");
38546 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
38547 EVT VT = N->getValueType(0);
38548 SDValue N0 = N->getOperand(0);
38549 SDValue N1 = N->getOperand(1);
38550 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
38551 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
38552 "Unexpected value type");
38553 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
38555 // Out of range logical bit shifts are guaranteed to be zero.
38556 // Out of range arithmetic bit shifts splat the sign bit.
38557 unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
38558 if (ShiftVal >= NumBitsPerElt) {
38560 return DAG.getConstant(0, SDLoc(N), VT);
38562 ShiftVal = NumBitsPerElt - 1;
38565 // Shift N0 by zero -> N0.
38569 // Shift zero -> zero.
38570 if (ISD::isBuildVectorAllZeros(N0.getNode()))
38571 return DAG.getConstant(0, SDLoc(N), VT);
38573 // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
38574 // clamped to (NumBitsPerElt - 1).
38575 if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
38576 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
38577 unsigned NewShiftVal = ShiftVal + ShiftVal2;
38578 if (NewShiftVal >= NumBitsPerElt)
38579 NewShiftVal = NumBitsPerElt - 1;
38580 return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
38581 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
38584 // We can decode 'whole byte' logical bit shifts as shuffles.
38585 if (LogicalShift && (ShiftVal % 8) == 0) {
38587 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38591 // Constant Folding.
38593 SmallVector<APInt, 32> EltBits;
38594 if (N->isOnlyUserOf(N0.getNode()) &&
38595 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
38596 assert(EltBits.size() == VT.getVectorNumElements() &&
38597 "Unexpected shift value type");
38598 for (APInt &Elt : EltBits) {
38599 if (X86ISD::VSHLI == Opcode)
38601 else if (X86ISD::VSRAI == Opcode)
38602 Elt.ashrInPlace(ShiftVal);
38604 Elt.lshrInPlace(ShiftVal);
38606 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
38609 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38610 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
38611 APInt::getAllOnesValue(NumBitsPerElt), DCI))
38612 return SDValue(N, 0);
38617 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
38618 TargetLowering::DAGCombinerInfo &DCI,
38619 const X86Subtarget &Subtarget) {
38620 EVT VT = N->getValueType(0);
38621 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
38622 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
38623 "Unexpected vector insertion");
38625 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
38626 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38627 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
38628 APInt::getAllOnesValue(NumBitsPerElt), DCI))
38629 return SDValue(N, 0);
38631 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
38633 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38639 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
38640 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
38641 /// OR -> CMPNEQSS.
38642 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
38643 TargetLowering::DAGCombinerInfo &DCI,
38644 const X86Subtarget &Subtarget) {
38647 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
38648 // we're requiring SSE2 for both.
38649 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
38650 SDValue N0 = N->getOperand(0);
38651 SDValue N1 = N->getOperand(1);
38652 SDValue CMP0 = N0.getOperand(1);
38653 SDValue CMP1 = N1.getOperand(1);
38656 // The SETCCs should both refer to the same CMP.
38657 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
38660 SDValue CMP00 = CMP0->getOperand(0);
38661 SDValue CMP01 = CMP0->getOperand(1);
38662 EVT VT = CMP00.getValueType();
38664 if (VT == MVT::f32 || VT == MVT::f64) {
38665 bool ExpectingFlags = false;
38666 // Check for any users that want flags:
38667 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
38668 !ExpectingFlags && UI != UE; ++UI)
38669 switch (UI->getOpcode()) {
38674 ExpectingFlags = true;
38676 case ISD::CopyToReg:
38677 case ISD::SIGN_EXTEND:
38678 case ISD::ZERO_EXTEND:
38679 case ISD::ANY_EXTEND:
38683 if (!ExpectingFlags) {
38684 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
38685 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
38687 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
38688 X86::CondCode tmp = cc0;
38693 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
38694 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
38695 // FIXME: need symbolic constants for these magic numbers.
38696 // See X86ATTInstPrinter.cpp:printSSECC().
38697 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
38698 if (Subtarget.hasAVX512()) {
38700 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
38701 DAG.getTargetConstant(x86cc, DL, MVT::i8));
38702 // Need to fill with zeros to ensure the bitcast will produce zeroes
38703 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
38704 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
38705 DAG.getConstant(0, DL, MVT::v16i1),
38706 FSetCC, DAG.getIntPtrConstant(0, DL));
38707 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
38708 N->getSimpleValueType(0));
38710 SDValue OnesOrZeroesF =
38711 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
38712 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
38714 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
38715 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
38717 if (is64BitFP && !Subtarget.is64Bit()) {
38718 // On a 32-bit target, we cannot bitcast the 64-bit float to a
38719 // 64-bit integer, since that's not a legal type. Since
38720 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
38721 // bits, but can do this little dance to extract the lowest 32 bits
38722 // and work with those going forward.
38723 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
38725 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
38726 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
38727 Vector32, DAG.getIntPtrConstant(0, DL));
38731 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
38732 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
38733 DAG.getConstant(1, DL, IntVT));
38734 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
38736 return OneBitOfTruth;
38744 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
38745 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
38746 assert(N->getOpcode() == ISD::AND);
38748 MVT VT = N->getSimpleValueType(0);
38749 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
38753 SDValue N0 = N->getOperand(0);
38754 SDValue N1 = N->getOperand(1);
38756 if (SDValue Not = IsNOT(N0, DAG)) {
38759 } else if (SDValue Not = IsNOT(N1, DAG)) {
38765 X = DAG.getBitcast(VT, X);
38766 Y = DAG.getBitcast(VT, Y);
38767 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
38770 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
38771 // register. In most cases we actually compare or select YMM-sized registers
38772 // and mixing the two types creates horrible code. This method optimizes
38773 // some of the transition sequences.
38774 // Even with AVX-512 this is still useful for removing casts around logical
38775 // operations on vXi1 mask types.
38776 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
38777 const X86Subtarget &Subtarget) {
38778 EVT VT = N->getValueType(0);
38779 assert(VT.isVector() && "Expected vector type");
38781 assert((N->getOpcode() == ISD::ANY_EXTEND ||
38782 N->getOpcode() == ISD::ZERO_EXTEND ||
38783 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
38785 SDValue Narrow = N->getOperand(0);
38786 EVT NarrowVT = Narrow.getValueType();
38788 if (Narrow->getOpcode() != ISD::XOR &&
38789 Narrow->getOpcode() != ISD::AND &&
38790 Narrow->getOpcode() != ISD::OR)
38793 SDValue N0 = Narrow->getOperand(0);
38794 SDValue N1 = Narrow->getOperand(1);
38797 // The Left side has to be a trunc.
38798 if (N0.getOpcode() != ISD::TRUNCATE)
38801 // The type of the truncated inputs.
38802 if (N0.getOperand(0).getValueType() != VT)
38805 // The right side has to be a 'trunc' or a constant vector.
38806 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
38807 N1.getOperand(0).getValueType() == VT;
38809 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
38812 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38814 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
38817 // Set N0 and N1 to hold the inputs to the new wide operation.
38818 N0 = N0.getOperand(0);
38820 N1 = N1.getOperand(0);
38822 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
38824 // Generate the wide operation.
38825 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
38826 unsigned Opcode = N->getOpcode();
38828 default: llvm_unreachable("Unexpected opcode");
38829 case ISD::ANY_EXTEND:
38831 case ISD::ZERO_EXTEND:
38832 return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
38833 case ISD::SIGN_EXTEND:
38834 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
38835 Op, DAG.getValueType(NarrowVT));
38839 /// If both input operands of a logic op are being cast from floating point
38840 /// types, try to convert this into a floating point logic node to avoid
38841 /// unnecessary moves from SSE to integer registers.
38842 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
38843 const X86Subtarget &Subtarget) {
38844 EVT VT = N->getValueType(0);
38845 SDValue N0 = N->getOperand(0);
38846 SDValue N1 = N->getOperand(1);
38849 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
38852 SDValue N00 = N0.getOperand(0);
38853 SDValue N10 = N1.getOperand(0);
38854 EVT N00Type = N00.getValueType();
38855 EVT N10Type = N10.getValueType();
38857 // Ensure that both types are the same and are legal scalar fp types.
38858 if (N00Type != N10Type ||
38859 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
38860 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
38864 switch (N->getOpcode()) {
38865 default: llvm_unreachable("Unexpected input node for FP logic conversion");
38866 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38867 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38868 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38871 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
38872 return DAG.getBitcast(VT, FPLogic);
38875 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
38876 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
38877 /// with a shift-right to eliminate loading the vector constant mask value.
38878 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
38879 const X86Subtarget &Subtarget) {
38880 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
38881 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
38882 EVT VT0 = Op0.getValueType();
38883 EVT VT1 = Op1.getValueType();
38885 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
38889 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
38890 !SplatVal.isMask())
38893 // Don't prevent creation of ANDN.
38894 if (isBitwiseNot(Op0))
38897 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
38900 unsigned EltBitWidth = VT0.getScalarSizeInBits();
38901 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
38905 unsigned ShiftVal = SplatVal.countTrailingOnes();
38906 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
38907 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
38908 return DAG.getBitcast(N->getValueType(0), Shift);
38911 // Get the index node from the lowered DAG of a GEP IR instruction with one
38912 // indexing dimension.
38913 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
38914 if (Ld->isIndexed())
38917 SDValue Base = Ld->getBasePtr();
38919 if (Base.getOpcode() != ISD::ADD)
38922 SDValue ShiftedIndex = Base.getOperand(0);
38924 if (ShiftedIndex.getOpcode() != ISD::SHL)
38927 return ShiftedIndex.getOperand(0);
38931 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
38932 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
38933 switch (VT.getSizeInBits()) {
38934 default: return false;
38935 case 64: return Subtarget.is64Bit() ? true : false;
38936 case 32: return true;
38942 // This function recognizes cases where X86 bzhi instruction can replace and
38943 // 'and-load' sequence.
38944 // In case of loading integer value from an array of constants which is defined
38947 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
38949 // then applying a bitwise and on the result with another input.
38950 // It's equivalent to performing bzhi (zero high bits) on the input, with the
38951 // same index of the load.
38952 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
38953 const X86Subtarget &Subtarget) {
38954 MVT VT = Node->getSimpleValueType(0);
38957 // Check if subtarget has BZHI instruction for the node's type
38958 if (!hasBZHI(Subtarget, VT))
38961 // Try matching the pattern for both operands.
38962 for (unsigned i = 0; i < 2; i++) {
38963 SDValue N = Node->getOperand(i);
38964 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
38966 // continue if the operand is not a load instruction
38970 const Value *MemOp = Ld->getMemOperand()->getValue();
38975 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
38976 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
38977 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
38979 Constant *Init = GV->getInitializer();
38980 Type *Ty = Init->getType();
38981 if (!isa<ConstantDataArray>(Init) ||
38982 !Ty->getArrayElementType()->isIntegerTy() ||
38983 Ty->getArrayElementType()->getScalarSizeInBits() !=
38984 VT.getSizeInBits() ||
38985 Ty->getArrayNumElements() >
38986 Ty->getArrayElementType()->getScalarSizeInBits())
38989 // Check if the array's constant elements are suitable to our case.
38990 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
38991 bool ConstantsMatch = true;
38992 for (uint64_t j = 0; j < ArrayElementCount; j++) {
38993 ConstantInt *Elem =
38994 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
38995 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
38996 ConstantsMatch = false;
39000 if (!ConstantsMatch)
39003 // Do the transformation (For 32-bit type):
39004 // -> (and (load arr[idx]), inp)
39005 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
39006 // that will be replaced with one bzhi instruction.
39007 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
39008 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
39010 // Get the Node which indexes into the array.
39011 SDValue Index = getIndexFromUnindexedLoad(Ld);
39014 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
39016 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
39017 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
39019 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
39020 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
39022 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
39030 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
39031 // Turn it into series of XORs and a setnp.
39032 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
39033 const X86Subtarget &Subtarget) {
39034 EVT VT = N->getValueType(0);
39036 // We only support 64-bit and 32-bit. 64-bit requires special handling
39037 // unless the 64-bit popcnt instruction is legal.
39038 if (VT != MVT::i32 && VT != MVT::i64)
39041 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39042 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
39045 SDValue N0 = N->getOperand(0);
39046 SDValue N1 = N->getOperand(1);
39048 // LHS needs to be a single use CTPOP.
39049 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
39052 // RHS needs to be 1.
39053 if (!isOneConstant(N1))
39057 SDValue X = N0.getOperand(0);
39059 // If this is 64-bit, its always best to xor the two 32-bit pieces together
39060 // even if we have popcnt.
39061 if (VT == MVT::i64) {
39062 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
39063 DAG.getNode(ISD::SRL, DL, VT, X,
39064 DAG.getConstant(32, DL, MVT::i8)));
39065 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
39066 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
39067 // Generate a 32-bit parity idiom. This will bring us back here if we need
39068 // to expand it too.
39069 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
39070 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
39071 DAG.getConstant(1, DL, MVT::i32));
39072 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
39074 assert(VT == MVT::i32 && "Unexpected VT!");
39076 // Xor the high and low 16-bits together using a 32-bit operation.
39077 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
39078 DAG.getConstant(16, DL, MVT::i8));
39079 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
39081 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
39082 // This should allow an h-reg to be used to save a shift.
39083 // FIXME: We only get an h-reg in 32-bit mode.
39084 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
39085 DAG.getNode(ISD::SRL, DL, VT, X,
39086 DAG.getConstant(8, DL, MVT::i8)));
39087 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
39088 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
39089 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
39091 // Copy the inverse of the parity flag into a register with setcc.
39092 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
39093 // Zero extend to original type.
39094 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
39097 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
39098 TargetLowering::DAGCombinerInfo &DCI,
39099 const X86Subtarget &Subtarget) {
39100 EVT VT = N->getValueType(0);
39102 // If this is SSE1 only convert to FAND to avoid scalarization.
39103 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
39104 return DAG.getBitcast(
39105 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
39106 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
39107 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
39110 // Use a 32-bit and+zext if upper bits known zero.
39111 if (VT == MVT::i64 && Subtarget.is64Bit() &&
39112 !isa<ConstantSDNode>(N->getOperand(1))) {
39113 APInt HiMask = APInt::getHighBitsSet(64, 32);
39114 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
39115 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
39117 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
39118 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
39119 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
39120 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
39124 // This must be done before legalization has expanded the ctpop.
39125 if (SDValue V = combineParity(N, DAG, Subtarget))
39128 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
39129 // TODO: Support multiple SrcOps.
39130 if (VT == MVT::i1) {
39131 SmallVector<SDValue, 2> SrcOps;
39132 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
39133 SrcOps.size() == 1) {
39135 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
39136 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
39137 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
39139 APInt AllBits = APInt::getAllOnesValue(NumElts);
39140 return DAG.getSetCC(dl, MVT::i1, Mask,
39141 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
39146 if (DCI.isBeforeLegalizeOps())
39149 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
39152 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
39155 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
39158 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
39161 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
39164 // Attempt to recursively combine a bitmask AND with shuffles.
39165 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
39167 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39171 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
39172 if ((VT.getScalarSizeInBits() % 8) == 0 &&
39173 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39174 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
39175 SDValue BitMask = N->getOperand(1);
39176 SDValue SrcVec = N->getOperand(0).getOperand(0);
39177 EVT SrcVecVT = SrcVec.getValueType();
39179 // Check that the constant bitmask masks whole bytes.
39181 SmallVector<APInt, 64> EltBits;
39182 if (VT == SrcVecVT.getScalarType() &&
39183 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
39184 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
39185 llvm::all_of(EltBits, [](APInt M) {
39186 return M.isNullValue() || M.isAllOnesValue();
39188 unsigned NumElts = SrcVecVT.getVectorNumElements();
39189 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
39190 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
39192 // Create a root shuffle mask from the byte mask and the extracted index.
39193 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
39194 for (unsigned i = 0; i != Scale; ++i) {
39197 int VecIdx = Scale * Idx + i;
39198 ShuffleMask[VecIdx] =
39199 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
39202 if (SDValue Shuffle = combineX86ShufflesRecursively(
39203 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
39204 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
39205 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
39206 N->getOperand(0).getOperand(1));
39213 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
39214 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
39215 const X86Subtarget &Subtarget) {
39216 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
39218 MVT VT = N->getSimpleValueType(0);
39219 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
39222 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
39223 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
39224 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
39227 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
39228 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
39229 bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
39230 Subtarget.hasVLX();
39231 if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
39232 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
39235 // Attempt to extract constant byte masks.
39236 APInt UndefElts0, UndefElts1;
39237 SmallVector<APInt, 32> EltBits0, EltBits1;
39238 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
39241 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
39245 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
39246 // TODO - add UNDEF elts support.
39247 if (UndefElts0[i] || UndefElts1[i])
39249 if (EltBits0[i] != ~EltBits1[i])
39254 SDValue X = N->getOperand(0);
39256 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
39257 DAG.getBitcast(VT, N1.getOperand(0)));
39258 return DAG.getNode(ISD::OR, DL, VT, X, Y);
39261 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
39262 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
39263 if (N->getOpcode() != ISD::OR)
39266 SDValue N0 = N->getOperand(0);
39267 SDValue N1 = N->getOperand(1);
39269 // Canonicalize AND to LHS.
39270 if (N1.getOpcode() == ISD::AND)
39273 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
39274 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
39277 Mask = N1.getOperand(0);
39278 X = N1.getOperand(1);
39280 // Check to see if the mask appeared in both the AND and ANDNP.
39281 if (N0.getOperand(0) == Mask)
39282 Y = N0.getOperand(1);
39283 else if (N0.getOperand(1) == Mask)
39284 Y = N0.getOperand(0);
39288 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
39289 // ANDNP combine allows other combines to happen that prevent matching.
39294 // (or (and (M, (sub 0, X)), (pandn M, X)))
39295 // which is a special case of vselect:
39296 // (vselect M, (sub 0, X), X)
39298 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
39299 // We know that, if fNegate is 0 or 1:
39300 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
39302 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
39303 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
39304 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
39305 // This lets us transform our vselect to:
39306 // (add (xor X, M), (and M, 1))
39308 // (sub (xor X, M), M)
39309 static SDValue combineLogicBlendIntoConditionalNegate(
39310 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
39311 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
39312 EVT MaskVT = Mask.getValueType();
39313 assert(MaskVT.isInteger() &&
39314 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
39315 "Mask must be zero/all-bits");
39317 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
39319 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
39322 auto IsNegV = [](SDNode *N, SDValue V) {
39323 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
39324 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
39328 if (IsNegV(Y.getNode(), X))
39330 else if (IsNegV(X.getNode(), Y))
39335 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
39336 SDValue SubOp2 = Mask;
39338 // If the negate was on the false side of the select, then
39339 // the operands of the SUB need to be swapped. PR 27251.
39340 // This is because the pattern being matched above is
39341 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
39342 // but if the pattern matched was
39343 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
39344 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
39345 // pattern also needs to be a negation of the replacement pattern above.
39346 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
39347 // sub accomplishes the negation of the replacement pattern.
39349 std::swap(SubOp1, SubOp2);
39351 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
39352 return DAG.getBitcast(VT, Res);
39356 // (or (and (m, y), (pandn m, x)))
39358 // (vselect m, x, y)
39359 // As a special case, try to fold:
39360 // (or (and (m, (sub 0, x)), (pandn m, x)))
39362 // (sub (xor X, M), M)
39363 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
39364 const X86Subtarget &Subtarget) {
39365 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
39367 EVT VT = N->getValueType(0);
39368 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
39369 (VT.is256BitVector() && Subtarget.hasInt256())))
39372 SDValue X, Y, Mask;
39373 if (!matchLogicBlend(N, X, Y, Mask))
39376 // Validate that X, Y, and Mask are bitcasts, and see through them.
39377 Mask = peekThroughBitcasts(Mask);
39378 X = peekThroughBitcasts(X);
39379 Y = peekThroughBitcasts(Y);
39381 EVT MaskVT = Mask.getValueType();
39382 unsigned EltBits = MaskVT.getScalarSizeInBits();
39384 // TODO: Attempt to handle floating point cases as well?
39385 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
39390 // Attempt to combine to conditional negate: (sub (xor X, M), M)
39391 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
39395 // PBLENDVB is only available on SSE 4.1.
39396 if (!Subtarget.hasSSE41())
39399 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
39401 X = DAG.getBitcast(BlendVT, X);
39402 Y = DAG.getBitcast(BlendVT, Y);
39403 Mask = DAG.getBitcast(BlendVT, Mask);
39404 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
39405 return DAG.getBitcast(VT, Mask);
39408 // Helper function for combineOrCmpEqZeroToCtlzSrl
39412 // srl(ctlz x), log2(bitsize(x))
39413 // Input pattern is checked by caller.
39414 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
39415 SelectionDAG &DAG) {
39416 SDValue Cmp = Op.getOperand(1);
39417 EVT VT = Cmp.getOperand(0).getValueType();
39418 unsigned Log2b = Log2_32(VT.getSizeInBits());
39420 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
39421 // The result of the shift is true or false, and on X86, the 32-bit
39422 // encoding of shr and lzcnt is more desirable.
39423 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
39424 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
39425 DAG.getConstant(Log2b, dl, MVT::i8));
39426 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
39429 // Try to transform:
39430 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
39432 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
39433 // Will also attempt to match more generic cases, eg:
39434 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
39435 // Only applies if the target supports the FastLZCNT feature.
39436 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
39437 TargetLowering::DAGCombinerInfo &DCI,
39438 const X86Subtarget &Subtarget) {
39439 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
39442 auto isORCandidate = [](SDValue N) {
39443 return (N->getOpcode() == ISD::OR && N->hasOneUse());
39446 // Check the zero extend is extending to 32-bit or more. The code generated by
39447 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
39448 // instructions to clear the upper bits.
39449 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
39450 !isORCandidate(N->getOperand(0)))
39453 // Check the node matches: setcc(eq, cmp 0)
39454 auto isSetCCCandidate = [](SDValue N) {
39455 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
39456 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
39457 N->getOperand(1).getOpcode() == X86ISD::CMP &&
39458 isNullConstant(N->getOperand(1).getOperand(1)) &&
39459 N->getOperand(1).getValueType().bitsGE(MVT::i32);
39462 SDNode *OR = N->getOperand(0).getNode();
39463 SDValue LHS = OR->getOperand(0);
39464 SDValue RHS = OR->getOperand(1);
39466 // Save nodes matching or(or, setcc(eq, cmp 0)).
39467 SmallVector<SDNode *, 2> ORNodes;
39468 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
39469 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
39470 ORNodes.push_back(OR);
39471 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
39472 LHS = OR->getOperand(0);
39473 RHS = OR->getOperand(1);
39476 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
39477 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
39478 !isORCandidate(SDValue(OR, 0)))
39481 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
39483 // or(srl(ctlz),srl(ctlz)).
39484 // The dag combiner can then fold it into:
39485 // srl(or(ctlz, ctlz)).
39486 EVT VT = OR->getValueType(0);
39487 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
39488 SDValue Ret, NewRHS;
39489 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
39490 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
39495 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
39496 while (ORNodes.size() > 0) {
39497 OR = ORNodes.pop_back_val();
39498 LHS = OR->getOperand(0);
39499 RHS = OR->getOperand(1);
39500 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
39501 if (RHS->getOpcode() == ISD::OR)
39502 std::swap(LHS, RHS);
39503 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
39506 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
39510 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
39515 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
39516 TargetLowering::DAGCombinerInfo &DCI,
39517 const X86Subtarget &Subtarget) {
39518 SDValue N0 = N->getOperand(0);
39519 SDValue N1 = N->getOperand(1);
39520 EVT VT = N->getValueType(0);
39522 // If this is SSE1 only convert to FOR to avoid scalarization.
39523 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
39524 return DAG.getBitcast(MVT::v4i32,
39525 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
39526 DAG.getBitcast(MVT::v4f32, N0),
39527 DAG.getBitcast(MVT::v4f32, N1)));
39530 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
39531 // TODO: Support multiple SrcOps.
39532 if (VT == MVT::i1) {
39533 SmallVector<SDValue, 2> SrcOps;
39534 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps) &&
39535 SrcOps.size() == 1) {
39537 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
39538 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
39539 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
39541 APInt AllBits = APInt::getNullValue(NumElts);
39542 return DAG.getSetCC(dl, MVT::i1, Mask,
39543 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETNE);
39548 if (DCI.isBeforeLegalizeOps())
39551 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
39554 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
39557 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
39560 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
39563 // Attempt to recursively combine an OR of shuffles.
39564 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
39566 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39570 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
39573 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
39574 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
39575 unsigned Bits = VT.getScalarSizeInBits();
39577 // SHLD/SHRD instructions have lower register pressure, but on some
39578 // platforms they have higher latency than the equivalent
39579 // series of shifts/or that would otherwise be generated.
39580 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
39581 // have higher latencies and we are not optimizing for size.
39582 if (!OptForSize && Subtarget.isSHLDSlow())
39585 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
39587 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
39589 if (!N0.hasOneUse() || !N1.hasOneUse())
39592 SDValue ShAmt0 = N0.getOperand(1);
39593 if (ShAmt0.getValueType() != MVT::i8)
39595 SDValue ShAmt1 = N1.getOperand(1);
39596 if (ShAmt1.getValueType() != MVT::i8)
39599 // Peek through any modulo shift masks.
39601 if (ShAmt0.getOpcode() == ISD::AND &&
39602 isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
39603 ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
39605 ShAmt0 = ShAmt0.getOperand(0);
39608 if (ShAmt1.getOpcode() == ISD::AND &&
39609 isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
39610 ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
39612 ShAmt1 = ShAmt1.getOperand(0);
39615 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
39616 ShAmt0 = ShAmt0.getOperand(0);
39617 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
39618 ShAmt1 = ShAmt1.getOperand(0);
39621 unsigned Opc = ISD::FSHL;
39622 SDValue Op0 = N0.getOperand(0);
39623 SDValue Op1 = N1.getOperand(0);
39624 if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
39626 std::swap(Op0, Op1);
39627 std::swap(ShAmt0, ShAmt1);
39628 std::swap(ShMsk0, ShMsk1);
39631 auto GetFunnelShift = [&DAG, &DL, VT, Opc](SDValue Op0, SDValue Op1,
39633 if (Opc == ISD::FSHR)
39634 std::swap(Op0, Op1);
39635 return DAG.getNode(Opc, DL, VT, Op0, Op1,
39636 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Amt));
39639 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
39640 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
39641 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
39642 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
39643 // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
39644 // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
39645 if (ShAmt1.getOpcode() == ISD::SUB) {
39646 SDValue Sum = ShAmt1.getOperand(0);
39647 if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
39648 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
39649 if (ShAmt1Op1.getOpcode() == ISD::AND &&
39650 isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
39651 ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
39652 ShMsk1 = ShAmt1Op1;
39653 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39655 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
39656 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39657 if ((SumC->getAPIntValue() == Bits ||
39658 (SumC->getAPIntValue() == 0 && ShMsk1)) &&
39659 ShAmt1Op1 == ShAmt0)
39660 return GetFunnelShift(Op0, Op1, ShAmt0);
39662 } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
39663 auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
39664 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
39665 return GetFunnelShift(Op0, Op1, ShAmt0);
39666 } else if (ShAmt1.getOpcode() == ISD::XOR) {
39667 SDValue Mask = ShAmt1.getOperand(1);
39668 if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
39669 unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
39670 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
39671 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
39672 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
39673 if (MaskC->getSExtValue() == (Bits - 1) &&
39674 (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
39675 if (Op1.getOpcode() == InnerShift &&
39676 isa<ConstantSDNode>(Op1.getOperand(1)) &&
39677 Op1.getConstantOperandAPInt(1) == 1) {
39678 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39680 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
39681 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
39682 Op1.getOperand(0) == Op1.getOperand(1)) {
39683 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39692 /// Try to turn tests against the signbit in the form of:
39693 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
39696 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
39697 // This is only worth doing if the output type is i8 or i1.
39698 EVT ResultType = N->getValueType(0);
39699 if (ResultType != MVT::i8 && ResultType != MVT::i1)
39702 SDValue N0 = N->getOperand(0);
39703 SDValue N1 = N->getOperand(1);
39705 // We should be performing an xor against a truncated shift.
39706 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
39709 // Make sure we are performing an xor against one.
39710 if (!isOneConstant(N1))
39713 // SetCC on x86 zero extends so only act on this if it's a logical shift.
39714 SDValue Shift = N0.getOperand(0);
39715 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
39718 // Make sure we are truncating from one of i16, i32 or i64.
39719 EVT ShiftTy = Shift.getValueType();
39720 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
39723 // Make sure the shift amount extracts the sign bit.
39724 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
39725 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
39728 // Create a greater-than comparison against -1.
39729 // N.B. Using SETGE against 0 works but we want a canonical looking
39730 // comparison, using SETGT matches up with what TranslateX86CC.
39732 SDValue ShiftOp = Shift.getOperand(0);
39733 EVT ShiftOpTy = ShiftOp.getValueType();
39734 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39735 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
39736 *DAG.getContext(), ResultType);
39737 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
39738 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
39739 if (SetCCResultType != ResultType)
39740 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
39744 /// Turn vector tests of the signbit in the form of:
39745 /// xor (sra X, elt_size(X)-1), -1
39749 /// This should be called before type legalization because the pattern may not
39750 /// persist after that.
39751 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
39752 const X86Subtarget &Subtarget) {
39753 EVT VT = N->getValueType(0);
39754 if (!VT.isSimple())
39757 switch (VT.getSimpleVT().SimpleTy) {
39758 default: return SDValue();
39761 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
39762 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
39766 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
39769 // There must be a shift right algebraic before the xor, and the xor must be a
39770 // 'not' operation.
39771 SDValue Shift = N->getOperand(0);
39772 SDValue Ones = N->getOperand(1);
39773 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
39774 !ISD::isBuildVectorAllOnes(Ones.getNode()))
39777 // The shift should be smearing the sign bit across each vector element.
39779 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
39781 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
39784 // Create a greater-than comparison against -1. We don't use the more obvious
39785 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
39786 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
39789 /// Detect patterns of truncation with unsigned saturation:
39791 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
39792 /// Return the source value x to be truncated or SDValue() if the pattern was
39795 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
39796 /// where C1 >= 0 and C2 is unsigned max of destination type.
39798 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
39799 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
39801 /// These two patterns are equivalent to:
39802 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
39803 /// So return the smax(x, C1) value to be truncated or SDValue() if the
39804 /// pattern was not matched.
39805 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39807 EVT InVT = In.getValueType();
39809 // Saturation with truncation. We truncate from InVT to VT.
39810 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
39811 "Unexpected types for truncate operation");
39813 // Match min/max and return limit value as a parameter.
39814 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
39815 if (V.getOpcode() == Opcode &&
39816 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
39817 return V.getOperand(0);
39822 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
39823 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
39824 // the element size of the destination type.
39825 if (C2.isMask(VT.getScalarSizeInBits()))
39828 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
39829 if (MatchMinMax(SMin, ISD::SMAX, C1))
39830 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
39833 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
39834 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
39835 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
39837 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
39843 /// Detect patterns of truncation with signed saturation:
39844 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
39845 /// signed_max_of_dest_type)) to dest_type)
39847 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
39848 /// signed_min_of_dest_type)) to dest_type).
39849 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
39850 /// Return the source value to be truncated or SDValue() if the pattern was not
39852 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
39853 unsigned NumDstBits = VT.getScalarSizeInBits();
39854 unsigned NumSrcBits = In.getScalarValueSizeInBits();
39855 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
39857 auto MatchMinMax = [](SDValue V, unsigned Opcode,
39858 const APInt &Limit) -> SDValue {
39860 if (V.getOpcode() == Opcode &&
39861 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
39862 return V.getOperand(0);
39866 APInt SignedMax, SignedMin;
39868 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
39869 SignedMin = APInt(NumSrcBits, 0);
39871 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
39872 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
39875 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
39876 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
39879 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
39880 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
39886 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
39888 const X86Subtarget &Subtarget) {
39889 if (!Subtarget.hasSSE2() || !VT.isVector())
39892 EVT SVT = VT.getVectorElementType();
39893 EVT InVT = In.getValueType();
39894 EVT InSVT = InVT.getVectorElementType();
39896 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
39897 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
39898 // and concatenate at the same time. Then we can use a final vpmovuswb to
39900 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
39901 InVT == MVT::v16i32 && VT == MVT::v16i8) {
39902 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39903 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
39904 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
39905 DL, DAG, Subtarget);
39906 assert(Mid && "Failed to pack!");
39907 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
39911 // vXi32 truncate instructions are available with AVX512F.
39912 // vXi16 truncate instructions are only available with AVX512BW.
39913 // For 256-bit or smaller vectors, we require VLX.
39914 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
39915 // If the result type is 256-bits or larger and we have disable 512-bit
39916 // registers, we should go ahead and use the pack instructions if possible.
39917 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
39918 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
39919 (InVT.getSizeInBits() > 128) &&
39920 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
39921 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
39923 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
39924 VT.getSizeInBits() >= 64 &&
39925 (SVT == MVT::i8 || SVT == MVT::i16) &&
39926 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
39927 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39928 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
39929 // Only do this when the result is at least 64 bits or we'll leaving
39930 // dangling PACKSSDW nodes.
39931 if (SVT == MVT::i8 && InSVT == MVT::i32) {
39932 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
39933 VT.getVectorNumElements());
39934 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
39936 assert(Mid && "Failed to pack!");
39937 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
39939 assert(V && "Failed to pack!");
39941 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
39942 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
39945 if (auto SSatVal = detectSSatPattern(In, VT))
39946 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
39950 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39951 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
39952 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
39955 if (auto SSatVal = detectSSatPattern(In, VT)) {
39957 TruncOpc = X86ISD::VTRUNCS;
39958 } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
39960 TruncOpc = X86ISD::VTRUNCUS;
39963 unsigned ResElts = VT.getVectorNumElements();
39964 // If the input type is less than 512 bits and we don't have VLX, we need
39965 // to widen to 512 bits.
39966 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
39967 unsigned NumConcats = 512 / InVT.getSizeInBits();
39968 ResElts *= NumConcats;
39969 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
39970 ConcatOps[0] = SatVal;
39971 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
39972 NumConcats * InVT.getVectorNumElements());
39973 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
39975 // Widen the result if its narrower than 128 bits.
39976 if (ResElts * SVT.getSizeInBits() < 128)
39977 ResElts = 128 / SVT.getSizeInBits();
39978 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
39979 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
39980 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
39981 DAG.getIntPtrConstant(0, DL));
39988 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
39989 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
39990 /// X86ISD::AVG instruction.
39991 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39992 const X86Subtarget &Subtarget,
39994 if (!VT.isVector())
39996 EVT InVT = In.getValueType();
39997 unsigned NumElems = VT.getVectorNumElements();
39999 EVT ScalarVT = VT.getVectorElementType();
40000 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
40001 NumElems >= 2 && isPowerOf2_32(NumElems)))
40004 // InScalarVT is the intermediate type in AVG pattern and it should be greater
40005 // than the original input type (i8/i16).
40006 EVT InScalarVT = InVT.getVectorElementType();
40007 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
40010 if (!Subtarget.hasSSE2())
40013 // Detect the following pattern:
40015 // %1 = zext <N x i8> %a to <N x i32>
40016 // %2 = zext <N x i8> %b to <N x i32>
40017 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
40018 // %4 = add nuw nsw <N x i32> %3, %2
40019 // %5 = lshr <N x i32> %N, <i32 1 x N>
40020 // %6 = trunc <N x i32> %5 to <N x i8>
40022 // In AVX512, the last instruction can also be a trunc store.
40023 if (In.getOpcode() != ISD::SRL)
40026 // A lambda checking the given SDValue is a constant vector and each element
40027 // is in the range [Min, Max].
40028 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
40029 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
40030 if (!BV || !BV->isConstant())
40032 for (SDValue Op : V->ops()) {
40033 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
40036 const APInt &Val = C->getAPIntValue();
40037 if (Val.ult(Min) || Val.ugt(Max))
40043 // Check if each element of the vector is right-shifted by one.
40044 auto LHS = In.getOperand(0);
40045 auto RHS = In.getOperand(1);
40046 if (!IsConstVectorInRange(RHS, 1, 1))
40048 if (LHS.getOpcode() != ISD::ADD)
40051 // Detect a pattern of a + b + 1 where the order doesn't matter.
40052 SDValue Operands[3];
40053 Operands[0] = LHS.getOperand(0);
40054 Operands[1] = LHS.getOperand(1);
40056 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
40057 ArrayRef<SDValue> Ops) {
40058 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
40061 // Take care of the case when one of the operands is a constant vector whose
40062 // element is in the range [1, 256].
40063 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
40064 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
40065 Operands[0].getOperand(0).getValueType() == VT) {
40066 // The pattern is detected. Subtract one from the constant vector, then
40067 // demote it and emit X86ISD::AVG instruction.
40068 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
40069 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
40070 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
40071 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
40072 { Operands[0].getOperand(0), Operands[1] },
40076 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
40077 // Match the or case only if its 'add-like' - can be replaced by an add.
40078 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
40079 if (ISD::ADD == V.getOpcode()) {
40080 Op0 = V.getOperand(0);
40081 Op1 = V.getOperand(1);
40084 if (ISD::ZERO_EXTEND != V.getOpcode())
40086 V = V.getOperand(0);
40087 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
40088 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
40090 Op0 = V.getOperand(0);
40091 Op1 = V.getOperand(1);
40096 if (FindAddLike(Operands[0], Op0, Op1))
40097 std::swap(Operands[0], Operands[1]);
40098 else if (!FindAddLike(Operands[1], Op0, Op1))
40103 // Now we have three operands of two additions. Check that one of them is a
40104 // constant vector with ones, and the other two can be promoted from i8/i16.
40105 for (int i = 0; i < 3; ++i) {
40106 if (!IsConstVectorInRange(Operands[i], 1, 1))
40108 std::swap(Operands[i], Operands[2]);
40110 // Check if Operands[0] and Operands[1] are results of type promotion.
40111 for (int j = 0; j < 2; ++j)
40112 if (Operands[j].getValueType() != VT) {
40113 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
40114 Operands[j].getOperand(0).getValueType() != VT)
40116 Operands[j] = Operands[j].getOperand(0);
40119 // The pattern is detected, emit X86ISD::AVG instruction(s).
40120 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
40127 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
40128 TargetLowering::DAGCombinerInfo &DCI,
40129 const X86Subtarget &Subtarget) {
40130 LoadSDNode *Ld = cast<LoadSDNode>(N);
40131 EVT RegVT = Ld->getValueType(0);
40132 EVT MemVT = Ld->getMemoryVT();
40134 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40136 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
40137 // into two 16-byte operations. Also split non-temporal aligned loads on
40138 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
40139 ISD::LoadExtType Ext = Ld->getExtensionType();
40141 unsigned Alignment = Ld->getAlignment();
40142 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
40143 Ext == ISD::NON_EXTLOAD &&
40144 ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
40145 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
40146 *Ld->getMemOperand(), &Fast) &&
40148 unsigned NumElems = RegVT.getVectorNumElements();
40152 unsigned HalfAlign = 16;
40153 SDValue Ptr1 = Ld->getBasePtr();
40154 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
40155 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
40158 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
40159 Alignment, Ld->getMemOperand()->getFlags());
40160 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
40161 Ld->getPointerInfo().getWithOffset(HalfAlign),
40162 MinAlign(Alignment, HalfAlign),
40163 Ld->getMemOperand()->getFlags());
40164 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
40165 Load1.getValue(1), Load2.getValue(1));
40167 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
40168 return DCI.CombineTo(N, NewVec, TF, true);
40171 // Bool vector load - attempt to cast to an integer, as we have good
40172 // (vXiY *ext(vXi1 bitcast(iX))) handling.
40173 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
40174 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
40175 unsigned NumElts = RegVT.getVectorNumElements();
40176 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40177 if (TLI.isTypeLegal(IntVT)) {
40178 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
40179 Ld->getPointerInfo(), Alignment,
40180 Ld->getMemOperand()->getFlags());
40181 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
40182 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
40189 /// If V is a build vector of boolean constants and exactly one of those
40190 /// constants is true, return the operand index of that true element.
40191 /// Otherwise, return -1.
40192 static int getOneTrueElt(SDValue V) {
40193 // This needs to be a build vector of booleans.
40194 // TODO: Checking for the i1 type matches the IR definition for the mask,
40195 // but the mask check could be loosened to i8 or other types. That might
40196 // also require checking more than 'allOnesValue'; eg, the x86 HW
40197 // instructions only require that the MSB is set for each mask element.
40198 // The ISD::MSTORE comments/definition do not specify how the mask operand
40200 auto *BV = dyn_cast<BuildVectorSDNode>(V);
40201 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
40204 int TrueIndex = -1;
40205 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
40206 for (unsigned i = 0; i < NumElts; ++i) {
40207 const SDValue &Op = BV->getOperand(i);
40210 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
40213 if (ConstNode->getAPIntValue().isAllOnesValue()) {
40214 // If we already found a one, this is too many.
40215 if (TrueIndex >= 0)
40223 /// Given a masked memory load/store operation, return true if it has one mask
40224 /// bit set. If it has one mask bit set, then also return the memory address of
40225 /// the scalar element to load/store, the vector index to insert/extract that
40226 /// scalar element, and the alignment for the scalar memory access.
40227 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
40228 SelectionDAG &DAG, SDValue &Addr,
40229 SDValue &Index, unsigned &Alignment) {
40230 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
40231 if (TrueMaskElt < 0)
40234 // Get the address of the one scalar element that is specified by the mask
40235 // using the appropriate offset from the base pointer.
40236 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
40237 Addr = MaskedOp->getBasePtr();
40238 if (TrueMaskElt != 0) {
40239 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
40240 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
40243 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
40244 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
40248 /// If exactly one element of the mask is set for a non-extending masked load,
40249 /// it is a scalar load and vector insert.
40250 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
40251 /// mask have already been optimized in IR, so we don't bother with those here.
40253 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
40254 TargetLowering::DAGCombinerInfo &DCI) {
40255 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
40256 // However, some target hooks may need to be added to know when the transform
40257 // is profitable. Endianness would also have to be considered.
40259 SDValue Addr, VecIndex;
40260 unsigned Alignment;
40261 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
40264 // Load the one scalar element that is specified by the mask using the
40265 // appropriate offset from the base pointer.
40267 EVT VT = ML->getValueType(0);
40268 EVT EltVT = VT.getVectorElementType();
40270 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
40271 Alignment, ML->getMemOperand()->getFlags());
40273 // Insert the loaded element into the appropriate place in the vector.
40274 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
40275 ML->getPassThru(), Load, VecIndex);
40276 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
40280 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
40281 TargetLowering::DAGCombinerInfo &DCI) {
40282 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
40286 EVT VT = ML->getValueType(0);
40288 // If we are loading the first and last elements of a vector, it is safe and
40289 // always faster to load the whole vector. Replace the masked load with a
40290 // vector load and select.
40291 unsigned NumElts = VT.getVectorNumElements();
40292 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
40293 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
40294 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
40295 if (LoadFirstElt && LoadLastElt) {
40296 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
40297 ML->getMemOperand());
40298 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
40299 ML->getPassThru());
40300 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
40303 // Convert a masked load with a constant mask into a masked load and a select.
40304 // This allows the select operation to use a faster kind of select instruction
40305 // (for example, vblendvps -> vblendps).
40307 // Don't try this if the pass-through operand is already undefined. That would
40308 // cause an infinite loop because that's what we're about to create.
40309 if (ML->getPassThru().isUndef())
40312 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
40315 // The new masked load has an undef pass-through operand. The select uses the
40316 // original pass-through operand.
40317 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
40318 ML->getMask(), DAG.getUNDEF(VT),
40319 ML->getMemoryVT(), ML->getMemOperand(),
40320 ML->getExtensionType());
40321 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
40322 ML->getPassThru());
40324 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
40327 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
40328 TargetLowering::DAGCombinerInfo &DCI,
40329 const X86Subtarget &Subtarget) {
40330 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
40332 // TODO: Expanding load with constant mask may be optimized as well.
40333 if (Mld->isExpandingLoad())
40336 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
40337 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
40339 // TODO: Do some AVX512 subsets benefit from this transform?
40340 if (!Subtarget.hasAVX512())
40341 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
40348 /// If exactly one element of the mask is set for a non-truncating masked store,
40349 /// it is a vector extract and scalar store.
40350 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
40351 /// mask have already been optimized in IR, so we don't bother with those here.
40352 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
40353 SelectionDAG &DAG) {
40354 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
40355 // However, some target hooks may need to be added to know when the transform
40356 // is profitable. Endianness would also have to be considered.
40358 SDValue Addr, VecIndex;
40359 unsigned Alignment;
40360 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
40363 // Extract the one scalar element that is actually being stored.
40365 EVT VT = MS->getValue().getValueType();
40366 EVT EltVT = VT.getVectorElementType();
40367 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
40368 MS->getValue(), VecIndex);
40370 // Store that element at the appropriate offset from the base pointer.
40371 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
40372 Alignment, MS->getMemOperand()->getFlags());
40375 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
40376 TargetLowering::DAGCombinerInfo &DCI,
40377 const X86Subtarget &Subtarget) {
40378 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
40379 if (Mst->isCompressingStore())
40382 EVT VT = Mst->getValue().getValueType();
40384 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40386 if (Mst->isTruncatingStore())
40389 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
40390 return ScalarStore;
40392 // If the mask value has been legalized to a non-boolean vector, try to
40393 // simplify ops leading up to it. We only demand the MSB of each lane.
40394 SDValue Mask = Mst->getMask();
40395 if (Mask.getScalarValueSizeInBits() != 1) {
40396 APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
40397 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
40398 return SDValue(N, 0);
40401 SDValue Value = Mst->getValue();
40402 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
40403 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
40404 Mst->getMemoryVT())) {
40405 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
40406 Mst->getBasePtr(), Mask,
40407 Mst->getMemoryVT(), Mst->getMemOperand(), true);
40413 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
40414 TargetLowering::DAGCombinerInfo &DCI,
40415 const X86Subtarget &Subtarget) {
40416 StoreSDNode *St = cast<StoreSDNode>(N);
40417 EVT StVT = St->getMemoryVT();
40419 unsigned Alignment = St->getAlignment();
40420 SDValue StoredVal = St->getValue();
40421 EVT VT = StoredVal.getValueType();
40422 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40424 // Convert a store of vXi1 into a store of iX and a bitcast.
40425 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
40426 VT.getVectorElementType() == MVT::i1) {
40428 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
40429 StoredVal = DAG.getBitcast(NewVT, StoredVal);
40431 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40432 St->getPointerInfo(), St->getAlignment(),
40433 St->getMemOperand()->getFlags());
40436 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
40437 // This will avoid a copy to k-register.
40438 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
40439 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40440 StoredVal.getOperand(0).getValueType() == MVT::i8) {
40441 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
40442 St->getBasePtr(), St->getPointerInfo(),
40443 St->getAlignment(), St->getMemOperand()->getFlags());
40446 // Widen v2i1/v4i1 stores to v8i1.
40447 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
40448 Subtarget.hasAVX512()) {
40449 unsigned NumConcats = 8 / VT.getVectorNumElements();
40450 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
40451 Ops[0] = StoredVal;
40452 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
40453 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40454 St->getPointerInfo(), St->getAlignment(),
40455 St->getMemOperand()->getFlags());
40458 // Turn vXi1 stores of constants into a scalar store.
40459 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
40460 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
40461 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
40462 // If its a v64i1 store without 64-bit support, we need two stores.
40463 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
40464 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
40465 StoredVal->ops().slice(0, 32));
40466 Lo = combinevXi1ConstantToInteger(Lo, DAG);
40467 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
40468 StoredVal->ops().slice(32, 32));
40469 Hi = combinevXi1ConstantToInteger(Hi, DAG);
40471 SDValue Ptr0 = St->getBasePtr();
40472 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
40475 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
40476 Alignment, St->getMemOperand()->getFlags());
40478 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
40479 St->getPointerInfo().getWithOffset(4),
40480 MinAlign(Alignment, 4U),
40481 St->getMemOperand()->getFlags());
40482 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
40485 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
40486 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40487 St->getPointerInfo(), St->getAlignment(),
40488 St->getMemOperand()->getFlags());
40491 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
40492 // Sandy Bridge, perform two 16-byte stores.
40494 if (VT.is256BitVector() && StVT == VT &&
40495 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
40496 *St->getMemOperand(), &Fast) &&
40498 unsigned NumElems = VT.getVectorNumElements();
40502 return splitVectorStore(St, DAG);
40505 // Split under-aligned vector non-temporal stores.
40506 if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
40507 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
40508 // vectors or the legalizer can scalarize it to use MOVNTI.
40509 if (VT.is256BitVector() || VT.is512BitVector()) {
40510 unsigned NumElems = VT.getVectorNumElements();
40513 return splitVectorStore(St, DAG);
40516 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
40518 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
40519 MVT NTVT = Subtarget.hasSSE4A()
40521 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
40522 return scalarizeVectorStore(St, NTVT, DAG);
40526 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
40527 // supported, but avx512f is by extending to v16i32 and truncating.
40528 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
40529 St->getValue().getOpcode() == ISD::TRUNCATE &&
40530 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
40531 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
40532 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
40533 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
40534 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
40535 MVT::v16i8, St->getMemOperand());
40538 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
40539 if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
40540 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
40541 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
40542 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
40543 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
40544 return EmitTruncSStore(IsSigned, St->getChain(),
40545 dl, StoredVal.getOperand(0), St->getBasePtr(),
40546 VT, St->getMemOperand(), DAG);
40549 // Optimize trunc store (of multiple scalars) to shuffle and store.
40550 // First, pack all of the elements in one place. Next, store to memory
40551 // in fewer chunks.
40552 if (St->isTruncatingStore() && VT.isVector()) {
40553 // Check if we can detect an AVG pattern from the truncation. If yes,
40554 // replace the trunc store by a normal store with the result of X86ISD::AVG
40556 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
40557 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
40559 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
40560 St->getPointerInfo(), St->getAlignment(),
40561 St->getMemOperand()->getFlags());
40563 if (TLI.isTruncStoreLegal(VT, StVT)) {
40564 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
40565 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
40566 dl, Val, St->getBasePtr(),
40567 St->getMemoryVT(), St->getMemOperand(), DAG);
40568 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
40570 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
40571 dl, Val, St->getBasePtr(),
40572 St->getMemoryVT(), St->getMemOperand(), DAG);
40578 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
40579 // the FP state in cases where an emms may be missing.
40580 // A preferable solution to the general problem is to figure out the right
40581 // places to insert EMMS. This qualifies as a quick hack.
40583 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
40584 if (VT.getSizeInBits() != 64)
40587 const Function &F = DAG.getMachineFunction().getFunction();
40588 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
40590 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
40591 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
40592 isa<LoadSDNode>(St->getValue()) &&
40593 cast<LoadSDNode>(St->getValue())->isSimple() &&
40594 St->getChain().hasOneUse() && St->isSimple()) {
40595 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
40596 SmallVector<SDValue, 8> Ops;
40598 if (!ISD::isNormalLoad(Ld))
40601 // If this is not the MMX case, i.e. we are just turning i64 load/store
40602 // into f64 load/store, avoid the transformation if there are multiple
40603 // uses of the loaded value.
40604 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
40609 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
40610 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
40612 if (Subtarget.is64Bit() || F64IsLegal) {
40613 MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
40614 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
40615 Ld->getMemOperand());
40617 // Make sure new load is placed in same chain order.
40618 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
40619 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
40620 St->getMemOperand());
40623 // Otherwise, lower to two pairs of 32-bit loads / stores.
40624 SDValue LoAddr = Ld->getBasePtr();
40625 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
40627 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
40628 Ld->getPointerInfo(), Ld->getAlignment(),
40629 Ld->getMemOperand()->getFlags());
40630 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
40631 Ld->getPointerInfo().getWithOffset(4),
40632 MinAlign(Ld->getAlignment(), 4),
40633 Ld->getMemOperand()->getFlags());
40634 // Make sure new loads are placed in same chain order.
40635 DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
40636 DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
40638 LoAddr = St->getBasePtr();
40639 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
40642 DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
40643 St->getAlignment(), St->getMemOperand()->getFlags());
40644 SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
40645 St->getPointerInfo().getWithOffset(4),
40646 MinAlign(St->getAlignment(), 4),
40647 St->getMemOperand()->getFlags());
40648 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
40651 // This is similar to the above case, but here we handle a scalar 64-bit
40652 // integer store that is extracted from a vector on a 32-bit target.
40653 // If we have SSE2, then we can treat it like a floating-point double
40654 // to get past legalization. The execution dependencies fixup pass will
40655 // choose the optimal machine instruction for the store if this really is
40656 // an integer or v2f32 rather than an f64.
40657 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
40658 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
40659 SDValue OldExtract = St->getOperand(1);
40660 SDValue ExtOp0 = OldExtract.getOperand(0);
40661 unsigned VecSize = ExtOp0.getValueSizeInBits();
40662 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
40663 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
40664 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
40665 BitCast, OldExtract.getOperand(1));
40666 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
40667 St->getPointerInfo(), St->getAlignment(),
40668 St->getMemOperand()->getFlags());
40674 /// Return 'true' if this vector operation is "horizontal"
40675 /// and return the operands for the horizontal operation in LHS and RHS. A
40676 /// horizontal operation performs the binary operation on successive elements
40677 /// of its first operand, then on successive elements of its second operand,
40678 /// returning the resulting values in a vector. For example, if
40679 /// A = < float a0, float a1, float a2, float a3 >
40681 /// B = < float b0, float b1, float b2, float b3 >
40682 /// then the result of doing a horizontal operation on A and B is
40683 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
40684 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
40685 /// A horizontal-op B, for some already available A and B, and if so then LHS is
40686 /// set to A, RHS to B, and the routine returns 'true'.
40687 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
40688 const X86Subtarget &Subtarget,
40689 bool IsCommutative) {
40690 // If either operand is undef, bail out. The binop should be simplified.
40691 if (LHS.isUndef() || RHS.isUndef())
40694 // Look for the following pattern:
40695 // A = < float a0, float a1, float a2, float a3 >
40696 // B = < float b0, float b1, float b2, float b3 >
40698 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
40699 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
40700 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
40701 // which is A horizontal-op B.
40703 MVT VT = LHS.getSimpleValueType();
40704 assert((VT.is128BitVector() || VT.is256BitVector()) &&
40705 "Unsupported vector type for horizontal add/sub");
40706 unsigned NumElts = VT.getVectorNumElements();
40708 // TODO - can we make a general helper method that does all of this for us?
40709 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
40710 SmallVectorImpl<int> &ShuffleMask) {
40711 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
40712 if (!Op.getOperand(0).isUndef())
40713 N0 = Op.getOperand(0);
40714 if (!Op.getOperand(1).isUndef())
40715 N1 = Op.getOperand(1);
40716 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
40717 ShuffleMask.append(Mask.begin(), Mask.end());
40720 bool UseSubVector = false;
40721 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40722 Op.getOperand(0).getValueType().is256BitVector() &&
40723 llvm::isNullConstant(Op.getOperand(1))) {
40724 Op = Op.getOperand(0);
40725 UseSubVector = true;
40728 SmallVector<SDValue, 2> SrcOps;
40729 SmallVector<int, 16> SrcShuffleMask;
40730 SDValue BC = peekThroughBitcasts(Op);
40731 if (isTargetShuffle(BC.getOpcode()) &&
40732 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
40733 SrcOps, SrcShuffleMask, IsUnary)) {
40734 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
40735 SrcOps.size() <= 2) {
40736 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
40737 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
40738 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
40740 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
40741 SrcOps.size() == 1) {
40742 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
40743 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
40744 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
40745 ShuffleMask.append(Mask.begin(), Mask.end());
40750 // View LHS in the form
40751 // LHS = VECTOR_SHUFFLE A, B, LMask
40752 // If LHS is not a shuffle, then pretend it is the identity shuffle:
40753 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
40754 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
40756 SmallVector<int, 16> LMask;
40757 GetShuffle(LHS, A, B, LMask);
40759 // Likewise, view RHS in the form
40760 // RHS = VECTOR_SHUFFLE C, D, RMask
40762 SmallVector<int, 16> RMask;
40763 GetShuffle(RHS, C, D, RMask);
40765 // At least one of the operands should be a vector shuffle.
40766 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
40767 if (NumShuffles == 0)
40770 if (LMask.empty()) {
40772 for (unsigned i = 0; i != NumElts; ++i)
40773 LMask.push_back(i);
40776 if (RMask.empty()) {
40778 for (unsigned i = 0; i != NumElts; ++i)
40779 RMask.push_back(i);
40782 // If A and B occur in reverse order in RHS, then canonicalize by commuting
40783 // RHS operands and shuffle mask.
40786 ShuffleVectorSDNode::commuteMask(RMask);
40788 // Check that the shuffles are both shuffling the same vectors.
40789 if (!(A == C && B == D))
40792 // LHS and RHS are now:
40793 // LHS = shuffle A, B, LMask
40794 // RHS = shuffle A, B, RMask
40795 // Check that the masks correspond to performing a horizontal operation.
40796 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
40797 // so we just repeat the inner loop if this is a 256-bit op.
40798 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
40799 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
40800 assert((NumEltsPer128BitChunk % 2 == 0) &&
40801 "Vector type should have an even number of elements in each lane");
40802 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
40803 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
40804 // Ignore undefined components.
40805 int LIdx = LMask[i + j], RIdx = RMask[i + j];
40806 if (LIdx < 0 || RIdx < 0 ||
40807 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
40808 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
40811 // The low half of the 128-bit result must choose from A.
40812 // The high half of the 128-bit result must choose from B,
40813 // unless B is undef. In that case, we are always choosing from A.
40814 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
40815 unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
40817 // Check that successive elements are being operated on. If not, this is
40818 // not a horizontal operation.
40819 int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
40820 if (!(LIdx == Index && RIdx == Index + 1) &&
40821 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
40826 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
40827 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
40829 if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
40832 LHS = DAG.getBitcast(VT, LHS);
40833 RHS = DAG.getBitcast(VT, RHS);
40837 /// Do target-specific dag combines on floating-point adds/subs.
40838 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
40839 const X86Subtarget &Subtarget) {
40840 EVT VT = N->getValueType(0);
40841 SDValue LHS = N->getOperand(0);
40842 SDValue RHS = N->getOperand(1);
40843 bool IsFadd = N->getOpcode() == ISD::FADD;
40844 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
40845 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
40847 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
40848 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
40849 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
40850 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
40851 return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
40856 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
40858 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
40859 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
40860 /// anything that is guaranteed to be transformed by DAGCombiner.
40861 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
40862 const X86Subtarget &Subtarget,
40864 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
40865 SDValue Src = N->getOperand(0);
40866 unsigned SrcOpcode = Src.getOpcode();
40867 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40869 EVT VT = N->getValueType(0);
40870 EVT SrcVT = Src.getValueType();
40872 auto IsFreeTruncation = [VT](SDValue Op) {
40873 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
40875 // See if this has been extended from a smaller/equal size to
40876 // the truncation size, allowing a truncation to combine with the extend.
40877 unsigned Opcode = Op.getOpcode();
40878 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
40879 Opcode == ISD::ZERO_EXTEND) &&
40880 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
40883 // See if this is a single use constant which can be constant folded.
40884 // NOTE: We don't peek throught bitcasts here because there is currently
40885 // no support for constant folding truncate+bitcast+vector_of_constants. So
40886 // we'll just send up with a truncate on both operands which will
40887 // get turned back into (truncate (binop)) causing an infinite loop.
40888 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
40891 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
40892 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
40893 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
40894 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
40897 // Don't combine if the operation has other uses.
40898 if (!Src.hasOneUse())
40901 // Only support vector truncation for now.
40902 // TODO: i64 scalar math would benefit as well.
40903 if (!VT.isVector())
40906 // In most cases its only worth pre-truncating if we're only facing the cost
40907 // of one truncation.
40908 // i.e. if one of the inputs will constant fold or the input is repeated.
40909 switch (SrcOpcode) {
40913 SDValue Op0 = Src.getOperand(0);
40914 SDValue Op1 = Src.getOperand(1);
40915 if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
40916 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40917 return TruncateArithmetic(Op0, Op1);
40922 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
40923 // better to truncate if we have the chance.
40924 if (SrcVT.getScalarType() == MVT::i64 &&
40925 TLI.isOperationLegal(SrcOpcode, VT) &&
40926 !TLI.isOperationLegal(SrcOpcode, SrcVT))
40927 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
40930 SDValue Op0 = Src.getOperand(0);
40931 SDValue Op1 = Src.getOperand(1);
40932 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40933 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40934 return TruncateArithmetic(Op0, Op1);
40938 // TODO: ISD::SUB We are conservative and require both sides to be freely
40939 // truncatable to avoid interfering with combineSubToSubus.
40940 SDValue Op0 = Src.getOperand(0);
40941 SDValue Op1 = Src.getOperand(1);
40942 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40943 (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
40944 return TruncateArithmetic(Op0, Op1);
40952 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
40953 /// e.g. trunc <8 x i32> X to <8 x i16> -->
40954 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
40955 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
40956 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
40957 const X86Subtarget &Subtarget,
40958 SelectionDAG &DAG) {
40959 SDValue In = N->getOperand(0);
40960 EVT InVT = In.getValueType();
40961 EVT OutVT = N->getValueType(0);
40963 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
40964 OutVT.getScalarSizeInBits());
40965 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
40966 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
40969 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
40970 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
40971 const X86Subtarget &Subtarget,
40972 SelectionDAG &DAG) {
40973 SDValue In = N->getOperand(0);
40974 EVT InVT = In.getValueType();
40975 EVT OutVT = N->getValueType(0);
40976 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
40977 DAG.getValueType(OutVT));
40978 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
40981 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
40982 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
40983 /// legalization the truncation will be translated into a BUILD_VECTOR with each
40984 /// element that is extracted from a vector and then truncated, and it is
40985 /// difficult to do this optimization based on them.
40986 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
40987 const X86Subtarget &Subtarget) {
40988 EVT OutVT = N->getValueType(0);
40989 if (!OutVT.isVector())
40992 SDValue In = N->getOperand(0);
40993 if (!In.getValueType().isSimple())
40996 EVT InVT = In.getValueType();
40997 unsigned NumElems = OutVT.getVectorNumElements();
40999 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
41000 // SSE2, and we need to take care of it specially.
41001 // AVX512 provides vpmovdb.
41002 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
41005 EVT OutSVT = OutVT.getVectorElementType();
41006 EVT InSVT = InVT.getVectorElementType();
41007 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
41008 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
41012 // SSSE3's pshufb results in less instructions in the cases below.
41013 if (Subtarget.hasSSSE3() && NumElems == 8 &&
41014 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
41015 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
41019 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
41020 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
41021 // truncate 2 x v4i32 to v8i16.
41022 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
41023 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
41024 if (InSVT == MVT::i32)
41025 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
41030 /// This function transforms vector truncation of 'extended sign-bits' or
41031 /// 'extended zero-bits' values.
41032 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
41033 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
41035 const X86Subtarget &Subtarget) {
41037 if (!Subtarget.hasSSE2())
41040 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
41043 SDValue In = N->getOperand(0);
41044 if (!In.getValueType().isSimple())
41047 MVT VT = N->getValueType(0).getSimpleVT();
41048 MVT SVT = VT.getScalarType();
41050 MVT InVT = In.getValueType().getSimpleVT();
41051 MVT InSVT = InVT.getScalarType();
41053 // Check we have a truncation suited for PACKSS/PACKUS.
41054 if (!VT.is128BitVector() && !VT.is256BitVector())
41056 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
41058 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
41061 // AVX512 has fast truncate, but if the input is already going to be split,
41062 // there's no harm in trying pack.
41063 if (Subtarget.hasAVX512() &&
41064 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
41065 InVT.is512BitVector()))
41068 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
41069 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
41071 // Use PACKUS if the input has zero-bits that extend all the way to the
41072 // packed/truncated value. e.g. masks, zext_in_reg, etc.
41073 KnownBits Known = DAG.computeKnownBits(In);
41074 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
41075 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
41076 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
41078 // Use PACKSS if the input has sign-bits that extend all the way to the
41079 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
41080 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
41081 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
41082 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
41087 // Try to form a MULHU or MULHS node by looking for
41088 // (trunc (srl (mul ext, ext), 16))
41089 // TODO: This is X86 specific because we want to be able to handle wide types
41090 // before type legalization. But we can only do it if the vector will be
41091 // legalized via widening/splitting. Type legalization can't handle promotion
41092 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
41094 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
41095 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
41096 // First instruction should be a right shift of a multiply.
41097 if (Src.getOpcode() != ISD::SRL ||
41098 Src.getOperand(0).getOpcode() != ISD::MUL)
41101 if (!Subtarget.hasSSE2())
41104 // Only handle vXi16 types that are at least 128-bits unless they will be
41106 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
41109 // Input type should be vXi32.
41110 EVT InVT = Src.getValueType();
41111 if (InVT.getVectorElementType() != MVT::i32)
41114 // Need a shift by 16.
41116 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
41120 SDValue LHS = Src.getOperand(0).getOperand(0);
41121 SDValue RHS = Src.getOperand(0).getOperand(1);
41123 unsigned ExtOpc = LHS.getOpcode();
41124 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
41125 RHS.getOpcode() != ExtOpc)
41128 // Peek through the extends.
41129 LHS = LHS.getOperand(0);
41130 RHS = RHS.getOperand(0);
41132 // Ensure the input types match.
41133 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
41136 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
41137 return DAG.getNode(Opc, DL, VT, LHS, RHS);
41140 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
41141 // from one vector with signed bytes from another vector, adds together
41142 // adjacent pairs of 16-bit products, and saturates the result before
41143 // truncating to 16-bits.
41145 // Which looks something like this:
41146 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
41147 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
41148 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
41149 const X86Subtarget &Subtarget,
41151 if (!VT.isVector() || !Subtarget.hasSSSE3())
41154 unsigned NumElems = VT.getVectorNumElements();
41155 EVT ScalarVT = VT.getVectorElementType();
41156 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
41159 SDValue SSatVal = detectSSatPattern(In, VT);
41160 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
41163 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
41164 // of multiplies from even/odd elements.
41165 SDValue N0 = SSatVal.getOperand(0);
41166 SDValue N1 = SSatVal.getOperand(1);
41168 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
41171 SDValue N00 = N0.getOperand(0);
41172 SDValue N01 = N0.getOperand(1);
41173 SDValue N10 = N1.getOperand(0);
41174 SDValue N11 = N1.getOperand(1);
41176 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
41177 // Canonicalize zero_extend to LHS.
41178 if (N01.getOpcode() == ISD::ZERO_EXTEND)
41179 std::swap(N00, N01);
41180 if (N11.getOpcode() == ISD::ZERO_EXTEND)
41181 std::swap(N10, N11);
41183 // Ensure we have a zero_extend and a sign_extend.
41184 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
41185 N01.getOpcode() != ISD::SIGN_EXTEND ||
41186 N10.getOpcode() != ISD::ZERO_EXTEND ||
41187 N11.getOpcode() != ISD::SIGN_EXTEND)
41190 // Peek through the extends.
41191 N00 = N00.getOperand(0);
41192 N01 = N01.getOperand(0);
41193 N10 = N10.getOperand(0);
41194 N11 = N11.getOperand(0);
41196 // Ensure the extend is from vXi8.
41197 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
41198 N01.getValueType().getVectorElementType() != MVT::i8 ||
41199 N10.getValueType().getVectorElementType() != MVT::i8 ||
41200 N11.getValueType().getVectorElementType() != MVT::i8)
41203 // All inputs should be build_vectors.
41204 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
41205 N01.getOpcode() != ISD::BUILD_VECTOR ||
41206 N10.getOpcode() != ISD::BUILD_VECTOR ||
41207 N11.getOpcode() != ISD::BUILD_VECTOR)
41210 // N00/N10 are zero extended. N01/N11 are sign extended.
41212 // For each element, we need to ensure we have an odd element from one vector
41213 // multiplied by the odd element of another vector and the even element from
41214 // one of the same vectors being multiplied by the even element from the
41215 // other vector. So we need to make sure for each element i, this operator
41216 // is being performed:
41217 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
41218 SDValue ZExtIn, SExtIn;
41219 for (unsigned i = 0; i != NumElems; ++i) {
41220 SDValue N00Elt = N00.getOperand(i);
41221 SDValue N01Elt = N01.getOperand(i);
41222 SDValue N10Elt = N10.getOperand(i);
41223 SDValue N11Elt = N11.getOperand(i);
41224 // TODO: Be more tolerant to undefs.
41225 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41226 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41227 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41228 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
41230 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
41231 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
41232 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
41233 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
41234 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
41236 unsigned IdxN00 = ConstN00Elt->getZExtValue();
41237 unsigned IdxN01 = ConstN01Elt->getZExtValue();
41238 unsigned IdxN10 = ConstN10Elt->getZExtValue();
41239 unsigned IdxN11 = ConstN11Elt->getZExtValue();
41240 // Add is commutative so indices can be reordered.
41241 if (IdxN00 > IdxN10) {
41242 std::swap(IdxN00, IdxN10);
41243 std::swap(IdxN01, IdxN11);
41245 // N0 indices be the even element. N1 indices must be the next odd element.
41246 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
41247 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
41249 SDValue N00In = N00Elt.getOperand(0);
41250 SDValue N01In = N01Elt.getOperand(0);
41251 SDValue N10In = N10Elt.getOperand(0);
41252 SDValue N11In = N11Elt.getOperand(0);
41253 // First time we find an input capture it.
41258 if (ZExtIn != N00In || SExtIn != N01In ||
41259 ZExtIn != N10In || SExtIn != N11In)
41263 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41264 ArrayRef<SDValue> Ops) {
41265 // Shrink by adding truncate nodes and let DAGCombine fold with the
41267 EVT InVT = Ops[0].getValueType();
41268 assert(InVT.getScalarType() == MVT::i8 &&
41269 "Unexpected scalar element type");
41270 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
41271 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
41272 InVT.getVectorNumElements() / 2);
41273 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
41275 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
41279 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
41280 const X86Subtarget &Subtarget) {
41281 EVT VT = N->getValueType(0);
41282 SDValue Src = N->getOperand(0);
41285 // Attempt to pre-truncate inputs to arithmetic ops instead.
41286 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
41289 // Try to detect AVG pattern first.
41290 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
41293 // Try to detect PMADD
41294 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
41297 // Try to combine truncation with signed/unsigned saturation.
41298 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
41301 // Try to combine PMULHUW/PMULHW for vXi16.
41302 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
41305 // The bitcast source is a direct mmx result.
41306 // Detect bitcasts between i32 to x86mmx
41307 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
41308 SDValue BCSrc = Src.getOperand(0);
41309 if (BCSrc.getValueType() == MVT::x86mmx)
41310 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
41313 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
41314 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
41317 return combineVectorTruncation(N, DAG, Subtarget);
41320 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
41321 EVT VT = N->getValueType(0);
41322 SDValue In = N->getOperand(0);
41325 if (auto SSatVal = detectSSatPattern(In, VT))
41326 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
41327 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
41328 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
41333 /// Returns the negated value if the node \p N flips sign of FP value.
41335 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
41337 /// AVX512F does not have FXOR, so FNEG is lowered as
41338 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
41339 /// In this case we go though all bitcasts.
41340 /// This also recognizes splat of a negated value and returns the splat of that
41342 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
41343 if (N->getOpcode() == ISD::FNEG)
41344 return N->getOperand(0);
41346 // Don't recurse exponentially.
41347 if (Depth > SelectionDAG::MaxRecursionDepth)
41350 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
41352 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
41353 EVT VT = Op->getValueType(0);
41354 // Make sure the element size does't change.
41355 if (VT.getScalarSizeInBits() != ScalarSize)
41358 if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
41359 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
41360 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
41361 if (!SVOp->getOperand(1).isUndef())
41363 if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode(), Depth + 1))
41364 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
41365 return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
41369 unsigned Opc = Op.getOpcode();
41370 if (Opc == ISD::INSERT_VECTOR_ELT) {
41371 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
41373 SDValue InsVector = Op.getOperand(0);
41374 SDValue InsVal = Op.getOperand(1);
41375 if (!InsVector.isUndef())
41377 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
41378 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
41379 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
41380 NegInsVal, Op.getOperand(2));
41384 if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
41387 SDValue Op1 = Op.getOperand(1);
41388 SDValue Op0 = Op.getOperand(0);
41390 // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
41391 // masks. For FSUB, we have to check if constant bits of Op0 are sign bit
41392 // masks and hence we swap the operands.
41393 if (Opc == ISD::FSUB)
41394 std::swap(Op0, Op1);
41397 SmallVector<APInt, 16> EltBits;
41398 // Extract constant bits and see if they are all sign bit masks. Ignore the
41400 if (getTargetConstantBitsFromNode(Op1, ScalarSize,
41401 UndefElts, EltBits,
41402 /* AllowWholeUndefs */ true,
41403 /* AllowPartialUndefs */ false)) {
41404 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
41405 if (!UndefElts[I] && !EltBits[I].isSignMask())
41408 return peekThroughBitcasts(Op0);
41414 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
41418 default: llvm_unreachable("Unexpected opcode");
41419 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
41420 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
41421 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
41422 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
41423 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
41424 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
41425 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
41426 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
41432 default: llvm_unreachable("Unexpected opcode");
41433 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
41434 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41435 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
41436 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41437 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
41438 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41439 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
41440 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
41441 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
41442 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
41443 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
41444 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
41450 default: llvm_unreachable("Unexpected opcode");
41451 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
41452 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41453 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
41454 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
41455 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
41456 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41457 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
41458 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41465 /// Do target-specific dag combines on floating point negations.
41466 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
41467 const X86Subtarget &Subtarget) {
41468 EVT OrigVT = N->getValueType(0);
41469 SDValue Arg = isFNEG(DAG, N);
41473 EVT VT = Arg.getValueType();
41474 EVT SVT = VT.getScalarType();
41477 // Let legalize expand this if it isn't a legal type yet.
41478 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
41481 // If we're negating a FMUL node on a target with FMA, then we can avoid the
41482 // use of a constant by performing (-0 - A*B) instead.
41483 // FIXME: Check rounding control flags as well once it becomes available.
41484 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
41485 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
41486 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
41487 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
41488 Arg.getOperand(1), Zero);
41489 return DAG.getBitcast(OrigVT, NewNode);
41492 // If we're negating an FMA node, then we can adjust the
41493 // instruction to include the extra negation.
41494 if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
41495 switch (Arg.getOpcode()) {
41497 case X86ISD::FMSUB:
41498 case X86ISD::FNMADD:
41499 case X86ISD::FNMSUB:
41500 case X86ISD::FMADD_RND:
41501 case X86ISD::FMSUB_RND:
41502 case X86ISD::FNMADD_RND:
41503 case X86ISD::FNMSUB_RND: {
41504 // We can't handle scalar intrinsic node here because it would only
41505 // invert one element and not the whole vector. But we could try to handle
41506 // a negation of the lower element only.
41507 unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
41508 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
41516 char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
41517 bool LegalOperations,
41519 unsigned Depth) const {
41520 // fneg patterns are removable even if they have multiple uses.
41521 if (isFNEG(DAG, Op.getNode(), Depth))
41524 // Don't recurse exponentially.
41525 if (Depth > SelectionDAG::MaxRecursionDepth)
41528 EVT VT = Op.getValueType();
41529 EVT SVT = VT.getScalarType();
41530 switch (Op.getOpcode()) {
41532 case X86ISD::FMSUB:
41533 case X86ISD::FNMADD:
41534 case X86ISD::FNMSUB:
41535 case X86ISD::FMADD_RND:
41536 case X86ISD::FMSUB_RND:
41537 case X86ISD::FNMADD_RND:
41538 case X86ISD::FNMSUB_RND: {
41539 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
41540 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
41543 // This is always negatible for free but we might be able to remove some
41544 // extra operand negations as well.
41545 for (int i = 0; i != 3; ++i) {
41546 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
41547 ForCodeSize, Depth + 1);
41555 return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
41556 ForCodeSize, Depth);
41559 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
41560 bool LegalOperations,
41562 unsigned Depth) const {
41563 // fneg patterns are removable even if they have multiple uses.
41564 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
41565 return DAG.getBitcast(Op.getValueType(), Arg);
41567 EVT VT = Op.getValueType();
41568 EVT SVT = VT.getScalarType();
41569 unsigned Opc = Op.getOpcode();
41572 case X86ISD::FMSUB:
41573 case X86ISD::FNMADD:
41574 case X86ISD::FNMSUB:
41575 case X86ISD::FMADD_RND:
41576 case X86ISD::FMSUB_RND:
41577 case X86ISD::FNMADD_RND:
41578 case X86ISD::FNMSUB_RND: {
41579 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
41580 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
41583 // This is always negatible for free but we might be able to remove some
41584 // extra operand negations as well.
41585 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
41586 for (int i = 0; i != 3; ++i) {
41587 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
41588 ForCodeSize, Depth + 1);
41590 NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
41591 ForCodeSize, Depth + 1);
41594 bool NegA = !!NewOps[0];
41595 bool NegB = !!NewOps[1];
41596 bool NegC = !!NewOps[2];
41597 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
41599 // Fill in the non-negated ops with the original values.
41600 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
41602 NewOps[i] = Op.getOperand(i);
41603 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
41607 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
41608 ForCodeSize, Depth);
41611 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
41612 const X86Subtarget &Subtarget) {
41613 MVT VT = N->getSimpleValueType(0);
41614 // If we have integer vector types available, use the integer opcodes.
41615 if (!VT.isVector() || !Subtarget.hasSSE2())
41620 unsigned IntBits = VT.getScalarSizeInBits();
41621 MVT IntSVT = MVT::getIntegerVT(IntBits);
41622 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
41624 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
41625 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
41626 unsigned IntOpcode;
41627 switch (N->getOpcode()) {
41628 default: llvm_unreachable("Unexpected FP logic op");
41629 case X86ISD::FOR: IntOpcode = ISD::OR; break;
41630 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
41631 case X86ISD::FAND: IntOpcode = ISD::AND; break;
41632 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
41634 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
41635 return DAG.getBitcast(VT, IntOp);
41639 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
41640 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
41641 if (N->getOpcode() != ISD::XOR)
41644 SDValue LHS = N->getOperand(0);
41645 auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
41646 if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
41649 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
41650 X86::CondCode(LHS->getConstantOperandVal(0)));
41652 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
41655 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
41656 TargetLowering::DAGCombinerInfo &DCI,
41657 const X86Subtarget &Subtarget) {
41658 // If this is SSE1 only convert to FXOR to avoid scalarization.
41659 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
41660 N->getValueType(0) == MVT::v4i32) {
41661 return DAG.getBitcast(
41662 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
41663 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
41664 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
41667 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
41670 if (DCI.isBeforeLegalizeOps())
41673 if (SDValue SetCC = foldXor1SetCC(N, DAG))
41676 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
41679 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
41682 return combineFneg(N, DAG, Subtarget);
41685 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
41686 TargetLowering::DAGCombinerInfo &DCI,
41687 const X86Subtarget &Subtarget) {
41688 SDValue Op0 = N->getOperand(0);
41689 SDValue Op1 = N->getOperand(1);
41690 EVT VT = N->getValueType(0);
41691 unsigned NumBits = VT.getSizeInBits();
41693 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41695 // TODO - Constant Folding.
41696 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
41697 // Reduce Cst1 to the bottom 16-bits.
41698 // NOTE: SimplifyDemandedBits won't do this for constants.
41699 const APInt &Val1 = Cst1->getAPIntValue();
41700 APInt MaskedVal1 = Val1 & 0xFFFF;
41701 if (MaskedVal1 != Val1)
41702 return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
41703 DAG.getConstant(MaskedVal1, SDLoc(N), VT));
41706 // Only bottom 16-bits of the control bits are required.
41707 APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
41708 if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
41709 return SDValue(N, 0);
41714 static bool isNullFPScalarOrVectorConst(SDValue V) {
41715 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
41718 /// If a value is a scalar FP zero or a vector FP zero (potentially including
41719 /// undefined elements), return a zero constant that may be used to fold away
41720 /// that value. In the case of a vector, the returned constant will not contain
41721 /// undefined elements even if the input parameter does. This makes it suitable
41722 /// to be used as a replacement operand with operations (eg, bitwise-and) where
41723 /// an undef should not propagate.
41724 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
41725 const X86Subtarget &Subtarget) {
41726 if (!isNullFPScalarOrVectorConst(V))
41729 if (V.getValueType().isVector())
41730 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
41735 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
41736 const X86Subtarget &Subtarget) {
41737 SDValue N0 = N->getOperand(0);
41738 SDValue N1 = N->getOperand(1);
41739 EVT VT = N->getValueType(0);
41742 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
41743 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
41744 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
41745 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
41748 auto isAllOnesConstantFP = [](SDValue V) {
41749 if (V.getSimpleValueType().isVector())
41750 return ISD::isBuildVectorAllOnes(V.getNode());
41751 auto *C = dyn_cast<ConstantFPSDNode>(V);
41752 return C && C->getConstantFPValue()->isAllOnesValue();
41755 // fand (fxor X, -1), Y --> fandn X, Y
41756 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
41757 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
41759 // fand X, (fxor Y, -1) --> fandn Y, X
41760 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
41761 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
41766 /// Do target-specific dag combines on X86ISD::FAND nodes.
41767 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
41768 const X86Subtarget &Subtarget) {
41769 // FAND(0.0, x) -> 0.0
41770 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
41773 // FAND(x, 0.0) -> 0.0
41774 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41777 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
41780 return lowerX86FPLogicOp(N, DAG, Subtarget);
41783 /// Do target-specific dag combines on X86ISD::FANDN nodes.
41784 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
41785 const X86Subtarget &Subtarget) {
41786 // FANDN(0.0, x) -> x
41787 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41788 return N->getOperand(1);
41790 // FANDN(x, 0.0) -> 0.0
41791 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41794 return lowerX86FPLogicOp(N, DAG, Subtarget);
41797 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
41798 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
41799 const X86Subtarget &Subtarget) {
41800 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
41802 // F[X]OR(0.0, x) -> x
41803 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41804 return N->getOperand(1);
41806 // F[X]OR(x, 0.0) -> x
41807 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
41808 return N->getOperand(0);
41810 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
41813 return lowerX86FPLogicOp(N, DAG, Subtarget);
41816 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
41817 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
41818 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
41820 // Only perform optimizations if UnsafeMath is used.
41821 if (!DAG.getTarget().Options.UnsafeFPMath)
41824 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
41825 // into FMINC and FMAXC, which are Commutative operations.
41826 unsigned NewOp = 0;
41827 switch (N->getOpcode()) {
41828 default: llvm_unreachable("unknown opcode");
41829 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
41830 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
41833 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
41834 N->getOperand(0), N->getOperand(1));
41837 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
41838 const X86Subtarget &Subtarget) {
41839 if (Subtarget.useSoftFloat())
41842 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41844 EVT VT = N->getValueType(0);
41845 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
41846 (Subtarget.hasSSE2() && VT == MVT::f64) ||
41847 (VT.isVector() && TLI.isTypeLegal(VT))))
41850 SDValue Op0 = N->getOperand(0);
41851 SDValue Op1 = N->getOperand(1);
41853 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
41855 // If we don't have to respect NaN inputs, this is a direct translation to x86
41856 // min/max instructions.
41857 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
41858 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41860 // If one of the operands is known non-NaN use the native min/max instructions
41861 // with the non-NaN input as second operand.
41862 if (DAG.isKnownNeverNaN(Op1))
41863 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41864 if (DAG.isKnownNeverNaN(Op0))
41865 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
41867 // If we have to respect NaN inputs, this takes at least 3 instructions.
41868 // Favor a library call when operating on a scalar and minimizing code size.
41869 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
41872 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
41875 // There are 4 possibilities involving NaN inputs, and these are the required
41879 // ----------------
41880 // Num | Max | Op0 |
41881 // Op0 ----------------
41882 // NaN | Op1 | NaN |
41883 // ----------------
41885 // The SSE FP max/min instructions were not designed for this case, but rather
41887 // Min = Op1 < Op0 ? Op1 : Op0
41888 // Max = Op1 > Op0 ? Op1 : Op0
41890 // So they always return Op0 if either input is a NaN. However, we can still
41891 // use those instructions for fmaxnum by selecting away a NaN input.
41893 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
41894 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
41895 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
41897 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
41898 // are NaN, the NaN value of Op1 is the result.
41899 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
41902 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
41903 TargetLowering::DAGCombinerInfo &DCI) {
41904 EVT VT = N->getValueType(0);
41905 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41907 APInt KnownUndef, KnownZero;
41908 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
41909 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
41911 return SDValue(N, 0);
41913 // Convert a full vector load into vzload when not all bits are needed.
41914 SDValue In = N->getOperand(0);
41915 MVT InVT = In.getSimpleValueType();
41916 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41917 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41918 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41919 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41920 // Unless the load is volatile or atomic.
41921 if (LN->isSimple()) {
41923 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41924 MVT MemVT = MVT::getIntegerVT(NumBits);
41925 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41926 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41927 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41929 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41930 LN->getPointerInfo(),
41931 LN->getAlignment(),
41932 LN->getMemOperand()->getFlags());
41933 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41934 DAG.getBitcast(InVT, VZLoad));
41935 DCI.CombineTo(N, Convert);
41936 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41937 return SDValue(N, 0);
41944 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
41945 TargetLowering::DAGCombinerInfo &DCI) {
41946 EVT VT = N->getValueType(0);
41948 // Convert a full vector load into vzload when not all bits are needed.
41949 SDValue In = N->getOperand(0);
41950 MVT InVT = In.getSimpleValueType();
41951 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41952 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41953 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
41954 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41955 // Unless the load is volatile or atomic.
41956 if (LN->isSimple()) {
41958 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41959 MVT MemVT = MVT::getFloatingPointVT(NumBits);
41960 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41961 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41962 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41964 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41965 LN->getPointerInfo(),
41966 LN->getAlignment(),
41967 LN->getMemOperand()->getFlags());
41968 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41969 DAG.getBitcast(InVT, VZLoad));
41970 DCI.CombineTo(N, Convert);
41971 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41972 return SDValue(N, 0);
41979 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
41980 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
41981 TargetLowering::DAGCombinerInfo &DCI,
41982 const X86Subtarget &Subtarget) {
41983 MVT VT = N->getSimpleValueType(0);
41985 // ANDNP(0, x) -> x
41986 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
41987 return N->getOperand(1);
41989 // ANDNP(x, 0) -> 0
41990 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
41991 return DAG.getConstant(0, SDLoc(N), VT);
41993 // Turn ANDNP back to AND if input is inverted.
41994 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
41995 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
41998 // Attempt to recursively combine a bitmask ANDNP with shuffles.
41999 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
42001 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42008 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
42009 TargetLowering::DAGCombinerInfo &DCI) {
42010 SDValue N0 = N->getOperand(0);
42011 SDValue N1 = N->getOperand(1);
42013 // BT ignores high bits in the bit index operand.
42014 unsigned BitWidth = N1.getValueSizeInBits();
42015 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
42016 if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
42017 return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
42022 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
42023 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
42024 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
42026 EVT DstVT = N->getValueType(0);
42028 SDValue N0 = N->getOperand(0);
42029 SDValue N1 = N->getOperand(1);
42030 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
42032 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
42035 // Look through single use any_extends / truncs.
42036 SDValue IntermediateBitwidthOp;
42037 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
42039 IntermediateBitwidthOp = N0;
42040 N0 = N0.getOperand(0);
42043 // See if we have a single use cmov.
42044 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
42047 SDValue CMovOp0 = N0.getOperand(0);
42048 SDValue CMovOp1 = N0.getOperand(1);
42050 // Make sure both operands are constants.
42051 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
42052 !isa<ConstantSDNode>(CMovOp1.getNode()))
42057 // If we looked through an any_extend/trunc above, add one to the constants.
42058 if (IntermediateBitwidthOp) {
42059 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
42060 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
42061 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
42064 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
42065 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
42067 EVT CMovVT = DstVT;
42068 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
42069 if (DstVT == MVT::i16) {
42071 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
42072 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
42075 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
42076 N0.getOperand(2), N0.getOperand(3));
42078 if (CMovVT != DstVT)
42079 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
42084 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
42085 const X86Subtarget &Subtarget) {
42086 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
42088 if (SDValue V = combineSextInRegCmov(N, DAG))
42091 EVT VT = N->getValueType(0);
42092 SDValue N0 = N->getOperand(0);
42093 SDValue N1 = N->getOperand(1);
42094 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
42097 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
42098 // both SSE and AVX2 since there is no sign-extended shift right
42099 // operation on a vector with 64-bit elements.
42100 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
42101 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
42102 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
42103 N0.getOpcode() == ISD::SIGN_EXTEND)) {
42104 SDValue N00 = N0.getOperand(0);
42106 // EXTLOAD has a better solution on AVX2,
42107 // it may be replaced with X86ISD::VSEXT node.
42108 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
42109 if (!ISD::isNormalLoad(N00.getNode()))
42112 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
42113 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
42115 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
42121 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
42122 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
42123 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
42124 /// opportunities to combine math ops, use an LEA, or use a complex addressing
42125 /// mode. This can eliminate extend, add, and shift instructions.
42126 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
42127 const X86Subtarget &Subtarget) {
42128 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
42129 Ext->getOpcode() != ISD::ZERO_EXTEND)
42132 // TODO: This should be valid for other integer types.
42133 EVT VT = Ext->getValueType(0);
42134 if (VT != MVT::i64)
42137 SDValue Add = Ext->getOperand(0);
42138 if (Add.getOpcode() != ISD::ADD)
42141 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
42142 bool NSW = Add->getFlags().hasNoSignedWrap();
42143 bool NUW = Add->getFlags().hasNoUnsignedWrap();
42145 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
42147 if ((Sext && !NSW) || (!Sext && !NUW))
42150 // Having a constant operand to the 'add' ensures that we are not increasing
42151 // the instruction count because the constant is extended for free below.
42152 // A constant operand can also become the displacement field of an LEA.
42153 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
42157 // Don't make the 'add' bigger if there's no hope of combining it with some
42158 // other 'add' or 'shl' instruction.
42159 // TODO: It may be profitable to generate simpler LEA instructions in place
42160 // of single 'add' instructions, but the cost model for selecting an LEA
42161 // currently has a high threshold.
42162 bool HasLEAPotential = false;
42163 for (auto *User : Ext->uses()) {
42164 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
42165 HasLEAPotential = true;
42169 if (!HasLEAPotential)
42172 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
42173 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
42174 SDValue AddOp0 = Add.getOperand(0);
42175 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
42176 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
42178 // The wider add is guaranteed to not wrap because both operands are
42181 Flags.setNoSignedWrap(NSW);
42182 Flags.setNoUnsignedWrap(NUW);
42183 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
42186 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
42187 // operands and the result of CMOV is not used anywhere else - promote CMOV
42188 // itself instead of promoting its result. This could be beneficial, because:
42189 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
42190 // (or more) pseudo-CMOVs only when they go one-after-another and
42191 // getting rid of result extension code after CMOV will help that.
42192 // 2) Promotion of constant CMOV arguments is free, hence the
42193 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
42194 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
42195 // promotion is also good in terms of code-size.
42196 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
42198 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
42199 SDValue CMovN = Extend->getOperand(0);
42200 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
42203 EVT TargetVT = Extend->getValueType(0);
42204 unsigned ExtendOpcode = Extend->getOpcode();
42207 EVT VT = CMovN.getValueType();
42208 SDValue CMovOp0 = CMovN.getOperand(0);
42209 SDValue CMovOp1 = CMovN.getOperand(1);
42211 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
42212 !isa<ConstantSDNode>(CMovOp1.getNode()))
42215 // Only extend to i32 or i64.
42216 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
42219 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
42221 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
42224 // If this a zero extend to i64, we should only extend to i32 and use a free
42225 // zero extend to finish.
42226 EVT ExtendVT = TargetVT;
42227 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
42228 ExtendVT = MVT::i32;
42230 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
42231 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
42233 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
42234 CMovN.getOperand(2), CMovN.getOperand(3));
42236 // Finish extending if needed.
42237 if (ExtendVT != TargetVT)
42238 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
42243 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
42244 // This is more or less the reverse of combineBitcastvxi1.
42246 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
42247 TargetLowering::DAGCombinerInfo &DCI,
42248 const X86Subtarget &Subtarget) {
42249 unsigned Opcode = N->getOpcode();
42250 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
42251 Opcode != ISD::ANY_EXTEND)
42253 if (!DCI.isBeforeLegalizeOps())
42255 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
42258 SDValue N0 = N->getOperand(0);
42259 EVT VT = N->getValueType(0);
42260 EVT SVT = VT.getScalarType();
42261 EVT InSVT = N0.getValueType().getScalarType();
42262 unsigned EltSizeInBits = SVT.getSizeInBits();
42264 // Input type must be extending a bool vector (bit-casted from a scalar
42265 // integer) to legal integer types.
42266 if (!VT.isVector())
42268 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
42270 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
42273 SDValue N00 = N0.getOperand(0);
42274 EVT SclVT = N0.getOperand(0).getValueType();
42275 if (!SclVT.isScalarInteger())
42280 SmallVector<int, 32> ShuffleMask;
42281 unsigned NumElts = VT.getVectorNumElements();
42282 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
42284 // Broadcast the scalar integer to the vector elements.
42285 if (NumElts > EltSizeInBits) {
42286 // If the scalar integer is greater than the vector element size, then we
42287 // must split it down into sub-sections for broadcasting. For example:
42288 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
42289 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
42290 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
42291 unsigned Scale = NumElts / EltSizeInBits;
42293 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
42294 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
42295 Vec = DAG.getBitcast(VT, Vec);
42297 for (unsigned i = 0; i != Scale; ++i)
42298 ShuffleMask.append(EltSizeInBits, i);
42300 // For smaller scalar integers, we can simply any-extend it to the vector
42301 // element size (we don't care about the upper bits) and broadcast it to all
42303 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
42304 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
42305 ShuffleMask.append(NumElts, 0);
42307 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
42309 // Now, mask the relevant bit in each element.
42310 SmallVector<SDValue, 32> Bits;
42311 for (unsigned i = 0; i != NumElts; ++i) {
42312 int BitIdx = (i % EltSizeInBits);
42313 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
42314 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
42316 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
42317 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
42319 // Compare against the bitmask and extend the result.
42320 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
42321 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
42322 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
42324 // For SEXT, this is now done, otherwise shift the result down for
42326 if (Opcode == ISD::SIGN_EXTEND)
42328 return DAG.getNode(ISD::SRL, DL, VT, Vec,
42329 DAG.getConstant(EltSizeInBits - 1, DL, VT));
42332 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
42334 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
42335 const X86Subtarget &Subtarget) {
42336 SDValue N0 = N->getOperand(0);
42337 EVT VT = N->getValueType(0);
42340 // Only do this combine with AVX512 for vector extends.
42341 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
42344 // Only combine legal element types.
42345 EVT SVT = VT.getVectorElementType();
42346 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
42347 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
42350 // We can only do this if the vector size in 256 bits or less.
42351 unsigned Size = VT.getSizeInBits();
42355 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
42356 // that's the only integer compares with we have.
42357 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
42358 if (ISD::isUnsignedIntSetCC(CC))
42361 // Only do this combine if the extension will be fully consumed by the setcc.
42362 EVT N00VT = N0.getOperand(0).getValueType();
42363 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
42364 if (Size != MatchingVecType.getSizeInBits())
42367 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
42369 if (N->getOpcode() == ISD::ZERO_EXTEND)
42370 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
42375 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
42376 TargetLowering::DAGCombinerInfo &DCI,
42377 const X86Subtarget &Subtarget) {
42378 SDValue N0 = N->getOperand(0);
42379 EVT VT = N->getValueType(0);
42380 EVT InVT = N0.getValueType();
42383 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
42386 if (!DCI.isBeforeLegalizeOps())
42389 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
42392 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
42393 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
42394 // Invert and sign-extend a boolean is the same as zero-extend and subtract
42395 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
42396 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
42397 // sext (xor Bool, -1) --> sub (zext Bool), 1
42398 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
42399 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
42402 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
42406 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
42409 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
42415 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
42416 TargetLowering::DAGCombinerInfo &DCI,
42417 const X86Subtarget &Subtarget) {
42419 EVT VT = N->getValueType(0);
42421 // Let legalize expand this if it isn't a legal type yet.
42422 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42423 if (!TLI.isTypeLegal(VT))
42426 EVT ScalarVT = VT.getScalarType();
42427 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
42430 SDValue A = N->getOperand(0);
42431 SDValue B = N->getOperand(1);
42432 SDValue C = N->getOperand(2);
42434 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
42435 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
42436 bool LegalOperations = !DCI.isBeforeLegalizeOps();
42437 if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
42438 V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
42441 // Look through extract_vector_elts. If it comes from an FNEG, create a
42442 // new extract from the FNEG input.
42443 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42444 isNullConstant(V.getOperand(1))) {
42445 SDValue Vec = V.getOperand(0);
42446 if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
42448 TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
42449 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
42450 NegVal, V.getOperand(1));
42458 // Do not convert the passthru input of scalar intrinsics.
42459 // FIXME: We could allow negations of the lower element only.
42460 bool NegA = invertIfNegative(A);
42461 bool NegB = invertIfNegative(B);
42462 bool NegC = invertIfNegative(C);
42464 if (!NegA && !NegB && !NegC)
42467 unsigned NewOpcode =
42468 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
42470 if (N->getNumOperands() == 4)
42471 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
42472 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
42475 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
42476 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
42477 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
42478 TargetLowering::DAGCombinerInfo &DCI) {
42480 EVT VT = N->getValueType(0);
42481 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42482 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
42483 bool LegalOperations = !DCI.isBeforeLegalizeOps();
42485 SDValue N2 = N->getOperand(2);
42486 if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
42489 SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
42490 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
42492 if (N->getNumOperands() == 4)
42493 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42494 NegN2, N->getOperand(3));
42495 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42499 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
42500 TargetLowering::DAGCombinerInfo &DCI,
42501 const X86Subtarget &Subtarget) {
42502 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
42503 // (and (i32 x86isd::setcc_carry), 1)
42504 // This eliminates the zext. This transformation is necessary because
42505 // ISD::SETCC is always legalized to i8.
42507 SDValue N0 = N->getOperand(0);
42508 EVT VT = N->getValueType(0);
42510 if (N0.getOpcode() == ISD::AND &&
42512 N0.getOperand(0).hasOneUse()) {
42513 SDValue N00 = N0.getOperand(0);
42514 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42515 if (!isOneConstant(N0.getOperand(1)))
42517 return DAG.getNode(ISD::AND, dl, VT,
42518 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42519 N00.getOperand(0), N00.getOperand(1)),
42520 DAG.getConstant(1, dl, VT));
42524 if (N0.getOpcode() == ISD::TRUNCATE &&
42526 N0.getOperand(0).hasOneUse()) {
42527 SDValue N00 = N0.getOperand(0);
42528 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42529 return DAG.getNode(ISD::AND, dl, VT,
42530 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42531 N00.getOperand(0), N00.getOperand(1)),
42532 DAG.getConstant(1, dl, VT));
42536 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
42539 if (DCI.isBeforeLegalizeOps())
42540 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
42543 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
42547 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
42550 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
42553 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
42556 // TODO: Combine with any target/faux shuffle.
42557 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
42558 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
42559 SDValue N00 = N0.getOperand(0);
42560 SDValue N01 = N0.getOperand(1);
42561 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
42562 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
42563 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
42564 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
42565 return concatSubVectors(N00, N01, DAG, dl);
42572 /// Try to map a 128-bit or larger integer comparison to vector instructions
42573 /// before type legalization splits it up into chunks.
42574 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
42575 const X86Subtarget &Subtarget) {
42576 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
42577 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
42579 // We're looking for an oversized integer equality comparison.
42580 SDValue X = SetCC->getOperand(0);
42581 SDValue Y = SetCC->getOperand(1);
42582 EVT OpVT = X.getValueType();
42583 unsigned OpSize = OpVT.getSizeInBits();
42584 if (!OpVT.isScalarInteger() || OpSize < 128)
42587 // Ignore a comparison with zero because that gets special treatment in
42588 // EmitTest(). But make an exception for the special case of a pair of
42589 // logically-combined vector-sized operands compared to zero. This pattern may
42590 // be generated by the memcmp expansion pass with oversized integer compares
42592 bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
42593 X.getOperand(0).getOpcode() == ISD::XOR &&
42594 X.getOperand(1).getOpcode() == ISD::XOR;
42595 if (isNullConstant(Y) && !IsOrXorXorCCZero)
42598 // Don't perform this combine if constructing the vector will be expensive.
42599 auto IsVectorBitCastCheap = [](SDValue X) {
42600 X = peekThroughBitcasts(X);
42601 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
42602 X.getOpcode() == ISD::LOAD;
42604 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
42608 EVT VT = SetCC->getValueType(0);
42610 bool HasAVX = Subtarget.hasAVX();
42612 // Use XOR (plus OR) and PTEST after SSE4.1 and before AVX512.
42613 // Otherwise use PCMPEQ (plus AND) and mask testing.
42614 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
42615 (OpSize == 256 && HasAVX) ||
42616 (OpSize == 512 && Subtarget.useAVX512Regs())) {
42617 bool HasPT = Subtarget.hasSSE41();
42618 EVT VecVT = MVT::v16i8;
42619 EVT CmpVT = MVT::v16i8;
42621 VecVT = CmpVT = MVT::v32i8;
42622 if (OpSize == 512) {
42623 if (Subtarget.hasBWI()) {
42624 VecVT = MVT::v64i8;
42625 CmpVT = MVT::v64i1;
42627 VecVT = MVT::v16i32;
42628 CmpVT = MVT::v16i1;
42633 if (IsOrXorXorCCZero) {
42634 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
42635 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
42636 // Use 2 vector equality compares and 'and' the results before doing a
42638 SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
42639 SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
42640 SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
42641 SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
42642 if (VecVT == CmpVT && HasPT) {
42643 SDValue Cmp1 = DAG.getNode(ISD::XOR, DL, VecVT, A, B);
42644 SDValue Cmp2 = DAG.getNode(ISD::XOR, DL, VecVT, C, D);
42645 Cmp = DAG.getNode(ISD::OR, DL, VecVT, Cmp1, Cmp2);
42647 SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
42648 SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
42649 Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
42652 SDValue VecX = DAG.getBitcast(VecVT, X);
42653 SDValue VecY = DAG.getBitcast(VecVT, Y);
42654 if (VecVT == CmpVT && HasPT) {
42655 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
42657 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
42660 // For 512-bits we want to emit a setcc that will lower to kortest.
42661 if (VecVT != CmpVT) {
42662 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 : MVT::i16;
42663 SDValue Mask = DAG.getAllOnesConstant(DL, KRegVT);
42664 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp), Mask, CC);
42667 SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
42669 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
42670 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
42671 SDValue SetCC = getSETCC(X86CC, PT, DL, DAG);
42672 return DAG.getNode(ISD::TRUNCATE, DL, VT, SetCC.getValue(0));
42674 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
42675 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
42676 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
42677 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
42678 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
42679 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
42680 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
42682 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
42688 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
42689 const X86Subtarget &Subtarget) {
42690 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
42691 SDValue LHS = N->getOperand(0);
42692 SDValue RHS = N->getOperand(1);
42693 EVT VT = N->getValueType(0);
42694 EVT OpVT = LHS.getValueType();
42697 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
42698 // 0-x == y --> x+y == 0
42699 // 0-x != y --> x+y != 0
42700 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
42702 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
42703 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42705 // x == 0-y --> x+y == 0
42706 // x != 0-y --> x+y != 0
42707 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
42709 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
42710 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42713 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
42717 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42718 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
42719 // Put build_vectors on the right.
42720 if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
42721 std::swap(LHS, RHS);
42722 CC = ISD::getSetCCSwappedOperands(CC);
42726 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
42727 (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
42728 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
42730 if (IsSEXT0 && IsVZero1) {
42731 assert(VT == LHS.getOperand(0).getValueType() &&
42732 "Uexpected operand type");
42733 if (CC == ISD::SETGT)
42734 return DAG.getConstant(0, DL, VT);
42735 if (CC == ISD::SETLE)
42736 return DAG.getConstant(1, DL, VT);
42737 if (CC == ISD::SETEQ || CC == ISD::SETGE)
42738 return DAG.getNOT(DL, LHS.getOperand(0), VT);
42740 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
42741 "Unexpected condition code!");
42742 return LHS.getOperand(0);
42746 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
42747 // pre-promote its result type since vXi1 vectors don't get promoted
42748 // during type legalization.
42749 // NOTE: The element count check is to ignore operand types that need to
42750 // go through type promotion to a 128-bit vector.
42751 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
42752 VT.getVectorElementType() == MVT::i1 &&
42753 (OpVT.getVectorElementType() == MVT::i8 ||
42754 OpVT.getVectorElementType() == MVT::i16)) {
42755 SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
42757 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
42760 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
42761 // to avoid scalarization via legalization because v4i32 is not a legal type.
42762 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
42763 LHS.getValueType() == MVT::v4f32)
42764 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
42769 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
42770 TargetLowering::DAGCombinerInfo &DCI,
42771 const X86Subtarget &Subtarget) {
42772 SDValue Src = N->getOperand(0);
42773 MVT SrcVT = Src.getSimpleValueType();
42774 MVT VT = N->getSimpleValueType(0);
42775 unsigned NumBits = VT.getScalarSizeInBits();
42776 unsigned NumElts = SrcVT.getVectorNumElements();
42778 // Perform constant folding.
42779 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
42780 assert(VT == MVT::i32 && "Unexpected result type");
42782 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
42783 if (!Src.getOperand(Idx).isUndef() &&
42784 Src.getConstantOperandAPInt(Idx).isNegative())
42787 return DAG.getConstant(Imm, SDLoc(N), VT);
42790 // Look through int->fp bitcasts that don't change the element width.
42791 unsigned EltWidth = SrcVT.getScalarSizeInBits();
42792 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
42793 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
42794 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
42796 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
42797 // with scalar comparisons.
42798 if (SDValue NotSrc = IsNOT(Src, DAG)) {
42800 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
42801 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
42802 return DAG.getNode(ISD::XOR, DL, VT,
42803 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
42804 DAG.getConstant(NotMask, DL, VT));
42807 // Simplify the inputs.
42808 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42809 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
42810 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
42811 return SDValue(N, 0);
42816 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
42817 TargetLowering::DAGCombinerInfo &DCI) {
42818 // With vector masks we only demand the upper bit of the mask.
42819 SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
42820 if (Mask.getScalarValueSizeInBits() != 1) {
42821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42822 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42823 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42824 return SDValue(N, 0);
42830 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
42831 TargetLowering::DAGCombinerInfo &DCI) {
42833 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
42834 SDValue Chain = GorS->getChain();
42835 SDValue Index = GorS->getIndex();
42836 SDValue Mask = GorS->getMask();
42837 SDValue Base = GorS->getBasePtr();
42838 SDValue Scale = GorS->getScale();
42840 if (DCI.isBeforeLegalize()) {
42841 unsigned IndexWidth = Index.getScalarValueSizeInBits();
42843 // Shrink constant indices if they are larger than 32-bits.
42844 // Only do this before legalize types since v2i64 could become v2i32.
42845 // FIXME: We could check that the type is legal if we're after legalize
42846 // types, but then we would need to construct test cases where that happens.
42847 // FIXME: We could support more than just constant vectors, but we need to
42848 // careful with costing. A truncate that can be optimized out would be fine.
42849 // Otherwise we might only want to create a truncate if it avoids a split.
42850 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
42851 if (BV->isConstant() && IndexWidth > 32 &&
42852 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
42853 unsigned NumElts = Index.getValueType().getVectorNumElements();
42854 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
42855 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
42856 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42857 SDValue Ops[] = { Chain, Gather->getPassThru(),
42858 Mask, Base, Index, Scale } ;
42859 return DAG.getMaskedGather(Gather->getVTList(),
42860 Gather->getMemoryVT(), DL, Ops,
42861 Gather->getMemOperand(),
42862 Gather->getIndexType());
42864 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42865 SDValue Ops[] = { Chain, Scatter->getValue(),
42866 Mask, Base, Index, Scale };
42867 return DAG.getMaskedScatter(Scatter->getVTList(),
42868 Scatter->getMemoryVT(), DL,
42869 Ops, Scatter->getMemOperand(),
42870 Scatter->getIndexType());
42874 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
42875 // there are sufficient sign bits. Only do this before legalize types to
42876 // avoid creating illegal types in truncate.
42877 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
42878 Index.getOpcode() == ISD::ZERO_EXTEND) &&
42880 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
42881 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
42882 unsigned NumElts = Index.getValueType().getVectorNumElements();
42883 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
42884 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
42885 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42886 SDValue Ops[] = { Chain, Gather->getPassThru(),
42887 Mask, Base, Index, Scale } ;
42888 return DAG.getMaskedGather(Gather->getVTList(),
42889 Gather->getMemoryVT(), DL, Ops,
42890 Gather->getMemOperand(),
42891 Gather->getIndexType());
42893 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42894 SDValue Ops[] = { Chain, Scatter->getValue(),
42895 Mask, Base, Index, Scale };
42896 return DAG.getMaskedScatter(Scatter->getVTList(),
42897 Scatter->getMemoryVT(), DL,
42898 Ops, Scatter->getMemOperand(),
42899 Scatter->getIndexType());
42903 if (DCI.isBeforeLegalizeOps()) {
42904 unsigned IndexWidth = Index.getScalarValueSizeInBits();
42906 // Make sure the index is either i32 or i64
42907 if (IndexWidth != 32 && IndexWidth != 64) {
42908 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
42909 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
42910 Index.getValueType().getVectorNumElements());
42911 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
42912 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42913 SDValue Ops[] = { Chain, Gather->getPassThru(),
42914 Mask, Base, Index, Scale } ;
42915 return DAG.getMaskedGather(Gather->getVTList(),
42916 Gather->getMemoryVT(), DL, Ops,
42917 Gather->getMemOperand(),
42918 Gather->getIndexType());
42920 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42921 SDValue Ops[] = { Chain, Scatter->getValue(),
42922 Mask, Base, Index, Scale };
42923 return DAG.getMaskedScatter(Scatter->getVTList(),
42924 Scatter->getMemoryVT(), DL,
42925 Ops, Scatter->getMemOperand(),
42926 Scatter->getIndexType());
42930 // With vector masks we only demand the upper bit of the mask.
42931 if (Mask.getScalarValueSizeInBits() != 1) {
42932 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42933 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42934 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42935 return SDValue(N, 0);
42941 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
42942 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
42943 const X86Subtarget &Subtarget) {
42945 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
42946 SDValue EFLAGS = N->getOperand(1);
42948 // Try to simplify the EFLAGS and condition code operands.
42949 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
42950 return getSETCC(CC, Flags, DL, DAG);
42955 /// Optimize branch condition evaluation.
42956 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
42957 const X86Subtarget &Subtarget) {
42959 SDValue EFLAGS = N->getOperand(3);
42960 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
42962 // Try to simplify the EFLAGS and condition code operands.
42963 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
42964 // RAUW them under us.
42965 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
42966 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
42967 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
42968 N->getOperand(1), Cond, Flags);
42974 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
42975 SelectionDAG &DAG) {
42976 // Take advantage of vector comparisons producing 0 or -1 in each lane to
42977 // optimize away operation when it's from a constant.
42979 // The general transformation is:
42980 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
42981 // AND(VECTOR_CMP(x,y), constant2)
42982 // constant2 = UNARYOP(constant)
42984 // Early exit if this isn't a vector operation, the operand of the
42985 // unary operation isn't a bitwise AND, or if the sizes of the operations
42986 // aren't the same.
42987 EVT VT = N->getValueType(0);
42988 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
42989 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
42990 VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
42993 // Now check that the other operand of the AND is a constant. We could
42994 // make the transformation for non-constant splats as well, but it's unclear
42995 // that would be a benefit as it would not eliminate any operations, just
42996 // perform one more step in scalar code before moving to the vector unit.
42997 if (auto *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(0).getOperand(1))) {
42998 // Bail out if the vector isn't a constant.
42999 if (!BV->isConstant())
43002 // Everything checks out. Build up the new and improved node.
43004 EVT IntVT = BV->getValueType(0);
43005 // Create a new constant of the appropriate type for the transformed
43007 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
43008 // The AND node needs bitcasts to/from an integer vector type around it.
43009 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
43010 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
43011 N->getOperand(0)->getOperand(0), MaskConst);
43012 SDValue Res = DAG.getBitcast(VT, NewAnd);
43019 /// If we are converting a value to floating-point, try to replace scalar
43020 /// truncate of an extracted vector element with a bitcast. This tries to keep
43021 /// the sequence on XMM registers rather than moving between vector and GPRs.
43022 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
43023 // TODO: This is currently only used by combineSIntToFP, but it is generalized
43024 // to allow being called by any similar cast opcode.
43025 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
43026 SDValue Trunc = N->getOperand(0);
43027 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
43030 SDValue ExtElt = Trunc.getOperand(0);
43031 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43032 !isNullConstant(ExtElt.getOperand(1)))
43035 EVT TruncVT = Trunc.getValueType();
43036 EVT SrcVT = ExtElt.getValueType();
43037 unsigned DestWidth = TruncVT.getSizeInBits();
43038 unsigned SrcWidth = SrcVT.getSizeInBits();
43039 if (SrcWidth % DestWidth != 0)
43042 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
43043 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
43044 unsigned VecWidth = SrcVecVT.getSizeInBits();
43045 unsigned NumElts = VecWidth / DestWidth;
43046 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
43047 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
43049 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
43050 BitcastVec, ExtElt.getOperand(1));
43051 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
43054 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
43055 const X86Subtarget &Subtarget) {
43056 SDValue Op0 = N->getOperand(0);
43057 EVT VT = N->getValueType(0);
43058 EVT InVT = Op0.getValueType();
43060 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
43061 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
43062 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
43063 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
43065 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43066 InVT.getVectorNumElements());
43067 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
43069 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
43070 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
43073 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
43074 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
43075 // the optimization here.
43076 if (DAG.SignBitIsZero(Op0))
43077 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
43082 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
43083 TargetLowering::DAGCombinerInfo &DCI,
43084 const X86Subtarget &Subtarget) {
43085 // First try to optimize away the conversion entirely when it's
43086 // conditionally from a constant. Vectors only.
43087 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
43090 // Now move on to more general possibilities.
43091 SDValue Op0 = N->getOperand(0);
43092 EVT VT = N->getValueType(0);
43093 EVT InVT = Op0.getValueType();
43095 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
43096 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
43097 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
43098 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
43100 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43101 InVT.getVectorNumElements());
43102 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
43103 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
43106 // Without AVX512DQ we only support i64 to float scalar conversion. For both
43107 // vectors and scalars, see if we know that the upper bits are all the sign
43108 // bit, in which case we can truncate the input to i32 and convert from that.
43109 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
43110 unsigned BitWidth = InVT.getScalarSizeInBits();
43111 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
43112 if (NumSignBits >= (BitWidth - 31)) {
43113 EVT TruncVT = MVT::i32;
43114 if (InVT.isVector())
43115 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
43116 InVT.getVectorNumElements());
43118 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
43119 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
43120 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
43122 // If we're after legalize and the type is v2i32 we need to shuffle and
43124 assert(InVT == MVT::v2i64 && "Unexpected VT!");
43125 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
43126 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
43128 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
43132 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
43133 // a 32-bit target where SSE doesn't support i64->FP operations.
43134 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
43135 Op0.getOpcode() == ISD::LOAD) {
43136 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
43137 EVT LdVT = Ld->getValueType(0);
43139 // This transformation is not supported if the result type is f16 or f128.
43140 if (VT == MVT::f16 || VT == MVT::f128)
43143 // If we have AVX512DQ we can use packed conversion instructions unless
43145 if (Subtarget.hasDQI() && VT != MVT::f80)
43148 if (Ld->isSimple() && !VT.isVector() &&
43149 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
43150 !Subtarget.is64Bit() && LdVT == MVT::i64) {
43151 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
43152 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
43153 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
43158 if (SDValue V = combineToFPTruncExtElt(N, DAG))
43164 static bool needCarryOrOverflowFlag(SDValue Flags) {
43165 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
43167 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
43169 SDNode *User = *UI;
43172 switch (User->getOpcode()) {
43174 // Be conservative.
43176 case X86ISD::SETCC:
43177 case X86ISD::SETCC_CARRY:
43178 CC = (X86::CondCode)User->getConstantOperandVal(0);
43180 case X86ISD::BRCOND:
43181 CC = (X86::CondCode)User->getConstantOperandVal(2);
43184 CC = (X86::CondCode)User->getConstantOperandVal(2);
43190 case X86::COND_A: case X86::COND_AE:
43191 case X86::COND_B: case X86::COND_BE:
43192 case X86::COND_O: case X86::COND_NO:
43193 case X86::COND_G: case X86::COND_GE:
43194 case X86::COND_L: case X86::COND_LE:
43202 static bool onlyZeroFlagUsed(SDValue Flags) {
43203 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
43205 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
43207 SDNode *User = *UI;
43210 switch (User->getOpcode()) {
43212 // Be conservative.
43214 case X86ISD::SETCC: CCOpNo = 0; break;
43215 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
43216 case X86ISD::BRCOND: CCOpNo = 2; break;
43217 case X86ISD::CMOV: CCOpNo = 2; break;
43220 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
43221 if (CC != X86::COND_E && CC != X86::COND_NE)
43228 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
43229 // Only handle test patterns.
43230 if (!isNullConstant(N->getOperand(1)))
43233 // If we have a CMP of a truncated binop, see if we can make a smaller binop
43234 // and use its flags directly.
43235 // TODO: Maybe we should try promoting compares that only use the zero flag
43236 // first if we can prove the upper bits with computeKnownBits?
43238 SDValue Op = N->getOperand(0);
43239 EVT VT = Op.getValueType();
43241 // If we have a constant logical shift that's only used in a comparison
43242 // against zero turn it into an equivalent AND. This allows turning it into
43243 // a TEST instruction later.
43244 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
43245 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
43246 onlyZeroFlagUsed(SDValue(N, 0))) {
43247 unsigned BitWidth = VT.getSizeInBits();
43248 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
43249 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
43250 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
43251 APInt Mask = Op.getOpcode() == ISD::SRL
43252 ? APInt::getHighBitsSet(BitWidth, MaskBits)
43253 : APInt::getLowBitsSet(BitWidth, MaskBits);
43254 if (Mask.isSignedIntN(32)) {
43255 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
43256 DAG.getConstant(Mask, dl, VT));
43257 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
43258 DAG.getConstant(0, dl, VT));
43263 // Look for a truncate with a single use.
43264 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
43267 Op = Op.getOperand(0);
43269 // Arithmetic op can only have one use.
43270 if (!Op.hasOneUse())
43274 switch (Op.getOpcode()) {
43275 default: return SDValue();
43277 // Skip and with constant. We have special handling for and with immediate
43278 // during isel to generate test instructions.
43279 if (isa<ConstantSDNode>(Op.getOperand(1)))
43281 NewOpc = X86ISD::AND;
43283 case ISD::OR: NewOpc = X86ISD::OR; break;
43284 case ISD::XOR: NewOpc = X86ISD::XOR; break;
43286 // If the carry or overflow flag is used, we can't truncate.
43287 if (needCarryOrOverflowFlag(SDValue(N, 0)))
43289 NewOpc = X86ISD::ADD;
43292 // If the carry or overflow flag is used, we can't truncate.
43293 if (needCarryOrOverflowFlag(SDValue(N, 0)))
43295 NewOpc = X86ISD::SUB;
43299 // We found an op we can narrow. Truncate its inputs.
43300 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
43301 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
43303 // Use a X86 specific opcode to avoid DAG combine messing with it.
43304 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43305 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
43307 // For AND, keep a CMP so that we can match the test pattern.
43308 if (NewOpc == X86ISD::AND)
43309 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
43310 DAG.getConstant(0, dl, VT));
43312 // Return the flags.
43313 return Op.getValue(1);
43316 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
43317 TargetLowering::DAGCombinerInfo &DCI) {
43318 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
43319 "Expected X86ISD::ADD or X86ISD::SUB");
43322 SDValue LHS = N->getOperand(0);
43323 SDValue RHS = N->getOperand(1);
43324 MVT VT = LHS.getSimpleValueType();
43325 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
43327 // If we don't use the flag result, simplify back to a generic ADD/SUB.
43328 if (!N->hasAnyUseOfValue(1)) {
43329 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
43330 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
43333 // Fold any similar generic ADD/SUB opcodes to reuse this node.
43334 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
43335 SDValue Ops[] = {N0, N1};
43336 SDVTList VTs = DAG.getVTList(N->getValueType(0));
43337 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
43340 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
43341 DCI.CombineTo(GenericAddSub, Op);
43344 MatchGeneric(LHS, RHS, false);
43345 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
43350 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
43351 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
43352 MVT VT = N->getSimpleValueType(0);
43353 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43354 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
43355 N->getOperand(0), N->getOperand(1),
43359 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
43360 // iff the flag result is dead.
43361 SDValue Op0 = N->getOperand(0);
43362 SDValue Op1 = N->getOperand(1);
43363 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
43364 !N->hasAnyUseOfValue(1))
43365 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
43366 Op0.getOperand(1), N->getOperand(2));
43371 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
43372 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
43373 TargetLowering::DAGCombinerInfo &DCI) {
43374 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
43375 // the result is either zero or one (depending on the input carry bit).
43376 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
43377 if (X86::isZeroNode(N->getOperand(0)) &&
43378 X86::isZeroNode(N->getOperand(1)) &&
43379 // We don't have a good way to replace an EFLAGS use, so only do this when
43381 SDValue(N, 1).use_empty()) {
43383 EVT VT = N->getValueType(0);
43384 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
43386 DAG.getNode(ISD::AND, DL, VT,
43387 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43388 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43390 DAG.getConstant(1, DL, VT));
43391 return DCI.CombineTo(N, Res1, CarryOut);
43394 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
43395 MVT VT = N->getSimpleValueType(0);
43396 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43397 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
43398 N->getOperand(0), N->getOperand(1),
43405 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
43406 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
43407 /// with CMP+{ADC, SBB}.
43408 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
43409 bool IsSub = N->getOpcode() == ISD::SUB;
43410 SDValue X = N->getOperand(0);
43411 SDValue Y = N->getOperand(1);
43413 // If this is an add, canonicalize a zext operand to the RHS.
43414 // TODO: Incomplete? What if both sides are zexts?
43415 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
43416 Y.getOpcode() != ISD::ZERO_EXTEND)
43419 // Look through a one-use zext.
43420 bool PeekedThroughZext = false;
43421 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
43422 Y = Y.getOperand(0);
43423 PeekedThroughZext = true;
43426 // If this is an add, canonicalize a setcc operand to the RHS.
43427 // TODO: Incomplete? What if both sides are setcc?
43428 // TODO: Should we allow peeking through a zext of the other operand?
43429 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
43430 Y.getOpcode() != X86ISD::SETCC)
43433 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
43437 EVT VT = N->getValueType(0);
43438 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
43440 // If X is -1 or 0, then we have an opportunity to avoid constants required in
43441 // the general case below.
43442 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
43444 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
43445 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
43446 // This is a complicated way to get -1 or 0 from the carry flag:
43447 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
43448 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
43449 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43450 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43454 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
43455 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
43456 SDValue EFLAGS = Y->getOperand(1);
43457 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
43458 EFLAGS.getValueType().isInteger() &&
43459 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
43460 // Swap the operands of a SUB, and we have the same pattern as above.
43461 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
43462 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
43463 SDValue NewSub = DAG.getNode(
43464 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
43465 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
43466 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
43467 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43468 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43474 if (CC == X86::COND_B) {
43475 // X + SETB Z --> adc X, 0
43476 // X - SETB Z --> sbb X, 0
43477 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
43478 DAG.getVTList(VT, MVT::i32), X,
43479 DAG.getConstant(0, DL, VT), Y.getOperand(1));
43482 if (CC == X86::COND_A) {
43483 SDValue EFLAGS = Y->getOperand(1);
43484 // Try to convert COND_A into COND_B in an attempt to facilitate
43485 // materializing "setb reg".
43487 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
43488 // cannot take an immediate as its first operand.
43490 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
43491 EFLAGS.getValueType().isInteger() &&
43492 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
43493 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
43494 EFLAGS.getNode()->getVTList(),
43495 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
43496 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
43497 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
43498 DAG.getVTList(VT, MVT::i32), X,
43499 DAG.getConstant(0, DL, VT), NewEFLAGS);
43503 if (CC != X86::COND_E && CC != X86::COND_NE)
43506 SDValue Cmp = Y.getOperand(1);
43507 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
43508 !X86::isZeroNode(Cmp.getOperand(1)) ||
43509 !Cmp.getOperand(0).getValueType().isInteger())
43512 SDValue Z = Cmp.getOperand(0);
43513 EVT ZVT = Z.getValueType();
43515 // If X is -1 or 0, then we have an opportunity to avoid constants required in
43516 // the general case below.
43518 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
43520 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
43521 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
43522 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
43523 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
43524 SDValue Zero = DAG.getConstant(0, DL, ZVT);
43525 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
43526 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
43527 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43528 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43529 SDValue(Neg.getNode(), 1));
43532 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
43533 // with fake operands:
43534 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
43535 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
43536 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
43537 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
43538 SDValue One = DAG.getConstant(1, DL, ZVT);
43539 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43540 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43541 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
43545 // (cmp Z, 1) sets the carry flag if Z is 0.
43546 SDValue One = DAG.getConstant(1, DL, ZVT);
43547 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43549 // Add the flags type for ADC/SBB nodes.
43550 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43552 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
43553 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
43554 if (CC == X86::COND_NE)
43555 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
43556 DAG.getConstant(-1ULL, DL, VT), Cmp1);
43558 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
43559 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
43560 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
43561 DAG.getConstant(0, DL, VT), Cmp1);
43564 static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
43565 const X86Subtarget &Subtarget) {
43566 if (!Subtarget.hasSSE2())
43569 EVT VT = N->getValueType(0);
43571 // If the vector size is less than 128, or greater than the supported RegSize,
43572 // do not use PMADD.
43573 if (!VT.isVector() || VT.getVectorNumElements() < 8)
43576 SDValue Op0 = N->getOperand(0);
43577 SDValue Op1 = N->getOperand(1);
43579 auto UsePMADDWD = [&](SDValue Op) {
43581 return Op.getOpcode() == ISD::MUL &&
43582 canReduceVMulWidth(Op.getNode(), DAG, Mode) && Mode != MULU16 &&
43583 (!Subtarget.hasSSE41() ||
43584 (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
43585 Op->isOnlyUserOf(Op.getOperand(1).getNode())));
43588 SDValue MulOp, OtherOp;
43589 if (UsePMADDWD(Op0)) {
43592 } else if (UsePMADDWD(Op1)) {
43599 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43600 VT.getVectorNumElements());
43601 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43602 VT.getVectorNumElements() / 2);
43604 // Shrink the operands of mul.
43605 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
43606 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
43608 // Madd vector size is half of the original vector size
43609 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43610 ArrayRef<SDValue> Ops) {
43611 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43612 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
43614 SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
43616 // Fill the rest of the output with 0
43617 SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
43618 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
43620 // Preserve the reduction flag on the ADD. We may need to revisit for the
43623 Flags.setVectorReduction(true);
43624 return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
43627 static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
43628 const X86Subtarget &Subtarget) {
43629 if (!Subtarget.hasSSE2())
43633 EVT VT = N->getValueType(0);
43635 // TODO: There's nothing special about i32, any integer type above i16 should
43636 // work just as well.
43637 if (!VT.isVector() || !VT.isSimple() ||
43638 !(VT.getVectorElementType() == MVT::i32))
43641 unsigned RegSize = 128;
43642 if (Subtarget.useBWIRegs())
43644 else if (Subtarget.hasAVX())
43647 // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
43648 // TODO: We should be able to handle larger vectors by splitting them before
43649 // feeding them into several SADs, and then reducing over those.
43650 if (VT.getSizeInBits() / 4 > RegSize)
43653 // We know N is a reduction add. To match SAD, we need one of the operands to
43655 SDValue AbsOp = N->getOperand(0);
43656 SDValue OtherOp = N->getOperand(1);
43657 if (AbsOp.getOpcode() != ISD::ABS)
43658 std::swap(AbsOp, OtherOp);
43659 if (AbsOp.getOpcode() != ISD::ABS)
43662 // Check whether we have an abs-diff pattern feeding into the select.
43663 SDValue SadOp0, SadOp1;
43664 if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
43667 // SAD pattern detected. Now build a SAD instruction and an addition for
43668 // reduction. Note that the number of elements of the result of SAD is less
43669 // than the number of elements of its input. Therefore, we could only update
43670 // part of elements in the reduction vector.
43671 SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);
43673 // The output of PSADBW is a vector of i64.
43674 // We need to turn the vector of i64 into a vector of i32.
43675 // If the reduction vector is at least as wide as the psadbw result, just
43676 // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
43677 // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
43678 // result to v2i32 which will be removed by type legalization. If we/ widen
43679 // narrow vectors then we bitcast to v4i32 and extract v2i32.
43680 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
43681 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
43683 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
43684 // Fill the upper elements with zero to match the add width.
43685 assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs");
43686 unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
43687 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
43689 Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
43690 } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
43691 Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
43692 DAG.getIntPtrConstant(0, DL));
43695 // Preserve the reduction flag on the ADD. We may need to revisit for the
43698 Flags.setVectorReduction(true);
43699 return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
43702 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
43703 const SDLoc &DL, EVT VT,
43704 const X86Subtarget &Subtarget) {
43705 // Example of pattern we try to detect:
43706 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
43707 //(add (build_vector (extract_elt t, 0),
43708 // (extract_elt t, 2),
43709 // (extract_elt t, 4),
43710 // (extract_elt t, 6)),
43711 // (build_vector (extract_elt t, 1),
43712 // (extract_elt t, 3),
43713 // (extract_elt t, 5),
43714 // (extract_elt t, 7)))
43716 if (!Subtarget.hasSSE2())
43719 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
43720 Op1.getOpcode() != ISD::BUILD_VECTOR)
43723 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43724 VT.getVectorNumElements() < 4 ||
43725 !isPowerOf2_32(VT.getVectorNumElements()))
43728 // Check if one of Op0,Op1 is of the form:
43729 // (build_vector (extract_elt Mul, 0),
43730 // (extract_elt Mul, 2),
43731 // (extract_elt Mul, 4),
43733 // the other is of the form:
43734 // (build_vector (extract_elt Mul, 1),
43735 // (extract_elt Mul, 3),
43736 // (extract_elt Mul, 5),
43738 // and identify Mul.
43740 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
43741 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
43742 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
43743 // TODO: Be more tolerant to undefs.
43744 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43745 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43746 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43747 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43749 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
43750 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
43751 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
43752 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
43753 if (!Const0L || !Const1L || !Const0H || !Const1H)
43755 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
43756 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
43757 // Commutativity of mul allows factors of a product to reorder.
43759 std::swap(Idx0L, Idx1L);
43761 std::swap(Idx0H, Idx1H);
43762 // Commutativity of add allows pairs of factors to reorder.
43763 if (Idx0L > Idx0H) {
43764 std::swap(Idx0L, Idx0H);
43765 std::swap(Idx1L, Idx1H);
43767 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
43768 Idx1H != 2 * i + 3)
43771 // First time an extract_elt's source vector is visited. Must be a MUL
43772 // with 2X number of vector elements than the BUILD_VECTOR.
43773 // Both extracts must be from same MUL.
43774 Mul = Op0L->getOperand(0);
43775 if (Mul->getOpcode() != ISD::MUL ||
43776 Mul.getValueType().getVectorNumElements() != 2 * e)
43779 // Check that the extract is from the same MUL previously seen.
43780 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
43781 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
43785 // Check if the Mul source can be safely shrunk.
43787 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
43790 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43791 ArrayRef<SDValue> Ops) {
43792 // Shrink by adding truncate nodes and let DAGCombine fold with the
43794 EVT InVT = Ops[0].getValueType();
43795 assert(InVT.getScalarType() == MVT::i32 &&
43796 "Unexpected scalar element type");
43797 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
43798 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43799 InVT.getVectorNumElements() / 2);
43800 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43801 InVT.getVectorNumElements());
43802 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
43803 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
43804 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
43806 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43807 { Mul.getOperand(0), Mul.getOperand(1) },
43811 // Attempt to turn this pattern into PMADDWD.
43812 // (mul (add (sext (build_vector)), (sext (build_vector))),
43813 // (add (sext (build_vector)), (sext (build_vector)))
43814 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
43815 const SDLoc &DL, EVT VT,
43816 const X86Subtarget &Subtarget) {
43817 if (!Subtarget.hasSSE2())
43820 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
43823 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43824 VT.getVectorNumElements() < 4 ||
43825 !isPowerOf2_32(VT.getVectorNumElements()))
43828 SDValue N00 = N0.getOperand(0);
43829 SDValue N01 = N0.getOperand(1);
43830 SDValue N10 = N1.getOperand(0);
43831 SDValue N11 = N1.getOperand(1);
43833 // All inputs need to be sign extends.
43834 // TODO: Support ZERO_EXTEND from known positive?
43835 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
43836 N01.getOpcode() != ISD::SIGN_EXTEND ||
43837 N10.getOpcode() != ISD::SIGN_EXTEND ||
43838 N11.getOpcode() != ISD::SIGN_EXTEND)
43841 // Peek through the extends.
43842 N00 = N00.getOperand(0);
43843 N01 = N01.getOperand(0);
43844 N10 = N10.getOperand(0);
43845 N11 = N11.getOperand(0);
43847 // Must be extending from vXi16.
43848 EVT InVT = N00.getValueType();
43849 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
43850 N10.getValueType() != InVT || N11.getValueType() != InVT)
43853 // All inputs should be build_vectors.
43854 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
43855 N01.getOpcode() != ISD::BUILD_VECTOR ||
43856 N10.getOpcode() != ISD::BUILD_VECTOR ||
43857 N11.getOpcode() != ISD::BUILD_VECTOR)
43860 // For each element, we need to ensure we have an odd element from one vector
43861 // multiplied by the odd element of another vector and the even element from
43862 // one of the same vectors being multiplied by the even element from the
43863 // other vector. So we need to make sure for each element i, this operator
43864 // is being performed:
43865 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
43867 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
43868 SDValue N00Elt = N00.getOperand(i);
43869 SDValue N01Elt = N01.getOperand(i);
43870 SDValue N10Elt = N10.getOperand(i);
43871 SDValue N11Elt = N11.getOperand(i);
43872 // TODO: Be more tolerant to undefs.
43873 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43874 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43875 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43876 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43878 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
43879 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
43880 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
43881 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
43882 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
43884 unsigned IdxN00 = ConstN00Elt->getZExtValue();
43885 unsigned IdxN01 = ConstN01Elt->getZExtValue();
43886 unsigned IdxN10 = ConstN10Elt->getZExtValue();
43887 unsigned IdxN11 = ConstN11Elt->getZExtValue();
43888 // Add is commutative so indices can be reordered.
43889 if (IdxN00 > IdxN10) {
43890 std::swap(IdxN00, IdxN10);
43891 std::swap(IdxN01, IdxN11);
43893 // N0 indices be the even element. N1 indices must be the next odd element.
43894 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
43895 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
43897 SDValue N00In = N00Elt.getOperand(0);
43898 SDValue N01In = N01Elt.getOperand(0);
43899 SDValue N10In = N10Elt.getOperand(0);
43900 SDValue N11In = N11Elt.getOperand(0);
43901 // First time we find an input capture it.
43906 // Mul is commutative so the input vectors can be in any order.
43907 // Canonicalize to make the compares easier.
43909 std::swap(N00In, N01In);
43911 std::swap(N10In, N11In);
43912 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
43916 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43917 ArrayRef<SDValue> Ops) {
43918 // Shrink by adding truncate nodes and let DAGCombine fold with the
43920 EVT OpVT = Ops[0].getValueType();
43921 assert(OpVT.getScalarType() == MVT::i16 &&
43922 "Unexpected scalar element type");
43923 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
43924 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43925 OpVT.getVectorNumElements() / 2);
43926 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
43928 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
43932 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
43933 TargetLowering::DAGCombinerInfo &DCI,
43934 const X86Subtarget &Subtarget) {
43935 const SDNodeFlags Flags = N->getFlags();
43936 if (Flags.hasVectorReduction()) {
43937 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
43939 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
43942 EVT VT = N->getValueType(0);
43943 SDValue Op0 = N->getOperand(0);
43944 SDValue Op1 = N->getOperand(1);
43946 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43948 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43951 // Try to synthesize horizontal adds from adds of shuffles.
43952 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43953 VT == MVT::v8i32) &&
43954 Subtarget.hasSSSE3() &&
43955 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
43956 auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43957 ArrayRef<SDValue> Ops) {
43958 return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
43960 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43964 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
43965 // (sub Y, (sext (vXi1 X))).
43966 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
43967 // generic DAG combine without a legal type check, but adding this there
43968 // caused regressions.
43969 if (VT.isVector()) {
43970 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43971 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
43972 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
43973 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
43975 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
43976 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
43979 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
43980 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
43981 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
43983 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
43984 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
43988 return combineAddOrSubToADCOrSBB(N, DAG);
43991 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
43992 const X86Subtarget &Subtarget) {
43993 SDValue Op0 = N->getOperand(0);
43994 SDValue Op1 = N->getOperand(1);
43995 EVT VT = N->getValueType(0);
43997 if (!VT.isVector())
44000 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
44001 // is only worth it with SSSE3 (PSHUFB).
44002 EVT EltVT = VT.getVectorElementType();
44003 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
44004 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
44005 !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
44008 SDValue SubusLHS, SubusRHS;
44009 // Try to find umax(a,b) - b or a - umin(a,b) patterns
44010 // they may be converted to subus(a,b).
44011 // TODO: Need to add IR canonicalization for this code.
44012 if (Op0.getOpcode() == ISD::UMAX) {
44014 SDValue MaxLHS = Op0.getOperand(0);
44015 SDValue MaxRHS = Op0.getOperand(1);
44018 else if (MaxRHS == Op1)
44022 } else if (Op1.getOpcode() == ISD::UMIN) {
44024 SDValue MinLHS = Op1.getOperand(0);
44025 SDValue MinRHS = Op1.getOperand(1);
44028 else if (MinRHS == Op0)
44035 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
44036 // special preprocessing in some cases.
44037 if (EltVT == MVT::i8 || EltVT == MVT::i16)
44038 return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
44040 assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
44043 // Special preprocessing case can be only applied
44044 // if the value was zero extended from 16 bit,
44045 // so we require first 16 bits to be zeros for 32 bit
44046 // values, or first 48 bits for 64 bit values.
44047 KnownBits Known = DAG.computeKnownBits(SubusLHS);
44048 unsigned NumZeros = Known.countMinLeadingZeros();
44049 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
44052 EVT ExtType = SubusLHS.getValueType();
44054 if (VT == MVT::v8i32 || VT == MVT::v8i64)
44055 ShrinkedType = MVT::v8i16;
44057 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
44059 // If SubusLHS is zeroextended - truncate SubusRHS to it's
44060 // size SubusRHS = umin(0xFFF.., SubusRHS).
44061 SDValue SaturationConst =
44062 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
44063 ShrinkedType.getScalarSizeInBits()),
44064 SDLoc(SubusLHS), ExtType);
44065 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
44067 SDValue NewSubusLHS =
44068 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
44069 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
44070 SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
44071 NewSubusLHS, NewSubusRHS);
44073 // Zero extend the result, it may be used somewhere as 32 bit,
44074 // if not zext and following trunc will shrink.
44075 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
44078 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
44079 TargetLowering::DAGCombinerInfo &DCI,
44080 const X86Subtarget &Subtarget) {
44081 SDValue Op0 = N->getOperand(0);
44082 SDValue Op1 = N->getOperand(1);
44084 // X86 can't encode an immediate LHS of a sub. See if we can push the
44085 // negation into a preceding instruction.
44086 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
44087 // If the RHS of the sub is a XOR with one use and a constant, invert the
44088 // immediate. Then add one to the LHS of the sub so we can turn
44089 // X-Y -> X+~Y+1, saving one register.
44090 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
44091 isa<ConstantSDNode>(Op1.getOperand(1))) {
44092 const APInt &XorC = Op1.getConstantOperandAPInt(1);
44093 EVT VT = Op0.getValueType();
44094 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
44096 DAG.getConstant(~XorC, SDLoc(Op1), VT));
44097 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
44098 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
44102 // Try to synthesize horizontal subs from subs of shuffles.
44103 EVT VT = N->getValueType(0);
44104 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
44105 VT == MVT::v8i32) &&
44106 Subtarget.hasSSSE3() &&
44107 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
44108 auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44109 ArrayRef<SDValue> Ops) {
44110 return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
44112 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
44116 // Try to create PSUBUS if SUB's argument is max/min
44117 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
44120 return combineAddOrSubToADCOrSBB(N, DAG);
44123 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
44124 const X86Subtarget &Subtarget) {
44125 MVT VT = N->getSimpleValueType(0);
44128 if (N->getOperand(0) == N->getOperand(1)) {
44129 if (N->getOpcode() == X86ISD::PCMPEQ)
44130 return DAG.getConstant(-1, DL, VT);
44131 if (N->getOpcode() == X86ISD::PCMPGT)
44132 return DAG.getConstant(0, DL, VT);
44138 /// Helper that combines an array of subvector ops as if they were the operands
44139 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
44140 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
44141 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
44142 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
44143 TargetLowering::DAGCombinerInfo &DCI,
44144 const X86Subtarget &Subtarget) {
44145 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
44147 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
44148 return DAG.getUNDEF(VT);
44150 if (llvm::all_of(Ops, [](SDValue Op) {
44151 return ISD::isBuildVectorAllZeros(Op.getNode());
44153 return getZeroVector(VT, Subtarget, DAG, DL);
44155 SDValue Op0 = Ops[0];
44157 // Fold subvector loads into one.
44158 // If needed, look through bitcasts to get to the load.
44159 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
44161 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
44162 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
44163 *FirstLd->getMemOperand(), &Fast) &&
44166 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
44171 // Repeated subvectors.
44172 if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
44173 // If this broadcast/subv_broadcast is inserted into both halves, use a
44174 // larger broadcast/subv_broadcast.
44175 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
44176 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
44177 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
44179 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
44180 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
44181 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
44182 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
44183 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
44185 DAG.getIntPtrConstant(0, DL)));
44187 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
44188 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
44189 (Subtarget.hasAVX2() ||
44190 (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
44191 Op0.getOperand(0).getValueType() == VT.getScalarType())
44192 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
44195 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
44197 // Repeated opcode.
44198 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
44199 // but it currently struggles with different vector widths.
44200 if (llvm::all_of(Ops, [Op0](SDValue Op) {
44201 return Op.getOpcode() == Op0.getOpcode();
44203 unsigned NumOps = Ops.size();
44204 switch (Op0.getOpcode()) {
44205 case X86ISD::PSHUFHW:
44206 case X86ISD::PSHUFLW:
44207 case X86ISD::PSHUFD:
44208 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
44209 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
44210 SmallVector<SDValue, 2> Src;
44211 for (unsigned i = 0; i != NumOps; ++i)
44212 Src.push_back(Ops[i].getOperand(0));
44213 return DAG.getNode(Op0.getOpcode(), DL, VT,
44214 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
44215 Op0.getOperand(1));
44218 case X86ISD::VPERMILPI:
44219 // TODO - add support for vXf64/vXi64 shuffles.
44220 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
44221 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
44222 SmallVector<SDValue, 2> Src;
44223 for (unsigned i = 0; i != NumOps; ++i)
44224 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
44225 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
44226 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
44227 Op0.getOperand(1));
44228 return DAG.getBitcast(VT, Res);
44231 case X86ISD::PACKUS:
44232 if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
44233 SmallVector<SDValue, 2> LHS, RHS;
44234 for (unsigned i = 0; i != NumOps; ++i) {
44235 LHS.push_back(Ops[i].getOperand(0));
44236 RHS.push_back(Ops[i].getOperand(1));
44238 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
44239 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
44240 NumOps * SrcVT.getVectorNumElements());
44241 return DAG.getNode(Op0.getOpcode(), DL, VT,
44242 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
44243 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
44252 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
44253 TargetLowering::DAGCombinerInfo &DCI,
44254 const X86Subtarget &Subtarget) {
44255 EVT VT = N->getValueType(0);
44256 EVT SrcVT = N->getOperand(0).getValueType();
44257 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44259 // Don't do anything for i1 vectors.
44260 if (VT.getVectorElementType() == MVT::i1)
44263 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
44264 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
44265 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
44273 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
44274 TargetLowering::DAGCombinerInfo &DCI,
44275 const X86Subtarget &Subtarget) {
44276 if (DCI.isBeforeLegalizeOps())
44279 MVT OpVT = N->getSimpleValueType(0);
44281 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
44284 SDValue Vec = N->getOperand(0);
44285 SDValue SubVec = N->getOperand(1);
44287 uint64_t IdxVal = N->getConstantOperandVal(2);
44288 MVT SubVecVT = SubVec.getSimpleValueType();
44290 if (Vec.isUndef() && SubVec.isUndef())
44291 return DAG.getUNDEF(OpVT);
44293 // Inserting undefs/zeros into zeros/undefs is a zero vector.
44294 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
44295 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
44296 return getZeroVector(OpVT, Subtarget, DAG, dl);
44298 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
44299 // If we're inserting into a zero vector and then into a larger zero vector,
44300 // just insert into the larger zero vector directly.
44301 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
44302 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
44303 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
44304 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44305 getZeroVector(OpVT, Subtarget, DAG, dl),
44306 SubVec.getOperand(1),
44307 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
44310 // If we're inserting into a zero vector and our input was extracted from an
44311 // insert into a zero vector of the same type and the extraction was at
44312 // least as large as the original insertion. Just insert the original
44313 // subvector into a zero vector.
44314 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
44315 isNullConstant(SubVec.getOperand(1)) &&
44316 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
44317 SDValue Ins = SubVec.getOperand(0);
44318 if (isNullConstant(Ins.getOperand(2)) &&
44319 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
44320 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
44321 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44322 getZeroVector(OpVT, Subtarget, DAG, dl),
44323 Ins.getOperand(1), N->getOperand(2));
44327 // Stop here if this is an i1 vector.
44331 // If this is an insert of an extract, combine to a shuffle. Don't do this
44332 // if the insert or extract can be represented with a subregister operation.
44333 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
44334 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
44335 (IdxVal != 0 || !Vec.isUndef())) {
44336 int ExtIdxVal = SubVec.getConstantOperandVal(1);
44337 if (ExtIdxVal != 0) {
44338 int VecNumElts = OpVT.getVectorNumElements();
44339 int SubVecNumElts = SubVecVT.getVectorNumElements();
44340 SmallVector<int, 64> Mask(VecNumElts);
44341 // First create an identity shuffle mask.
44342 for (int i = 0; i != VecNumElts; ++i)
44344 // Now insert the extracted portion.
44345 for (int i = 0; i != SubVecNumElts; ++i)
44346 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
44348 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
44352 // Match concat_vector style patterns.
44353 SmallVector<SDValue, 2> SubVectorOps;
44354 if (collectConcatOps(N, SubVectorOps)) {
44356 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
44359 // If we're inserting all zeros into the upper half, change this to
44360 // a concat with zero. We will match this to a move
44361 // with implicit upper bit zeroing during isel.
44362 // We do this here because we don't want combineConcatVectorOps to
44363 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
44364 if (SubVectorOps.size() == 2 &&
44365 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
44366 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44367 getZeroVector(OpVT, Subtarget, DAG, dl),
44368 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
44371 // If this is a broadcast insert into an upper undef, use a larger broadcast.
44372 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
44373 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
44375 // If this is a broadcast load inserted into an upper undef, use a larger
44377 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
44378 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
44379 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
44380 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
44381 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
44383 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
44384 MemIntr->getMemoryVT(),
44385 MemIntr->getMemOperand());
44386 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
44393 /// If we are extracting a subvector of a vector select and the select condition
44394 /// is composed of concatenated vectors, try to narrow the select width. This
44395 /// is a common pattern for AVX1 integer code because 256-bit selects may be
44396 /// legal, but there is almost no integer math/logic available for 256-bit.
44397 /// This function should only be called with legal types (otherwise, the calls
44398 /// to get simple value types will assert).
44399 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
44400 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
44401 SmallVector<SDValue, 4> CatOps;
44402 if (Sel.getOpcode() != ISD::VSELECT ||
44403 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
44406 // Note: We assume simple value types because this should only be called with
44407 // legal operations/types.
44408 // TODO: This can be extended to handle extraction to 256-bits.
44409 MVT VT = Ext->getSimpleValueType(0);
44410 if (!VT.is128BitVector())
44413 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
44414 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
44417 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
44418 MVT SelVT = Sel.getSimpleValueType();
44419 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
44420 "Unexpected vector type with legal operations");
44422 unsigned SelElts = SelVT.getVectorNumElements();
44423 unsigned CastedElts = WideVT.getVectorNumElements();
44424 unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
44425 if (SelElts % CastedElts == 0) {
44426 // The select has the same or more (narrower) elements than the extract
44427 // operand. The extraction index gets scaled by that factor.
44428 ExtIdx *= (SelElts / CastedElts);
44429 } else if (CastedElts % SelElts == 0) {
44430 // The select has less (wider) elements than the extract operand. Make sure
44431 // that the extraction index can be divided evenly.
44432 unsigned IndexDivisor = CastedElts / SelElts;
44433 if (ExtIdx % IndexDivisor != 0)
44435 ExtIdx /= IndexDivisor;
44437 llvm_unreachable("Element count of simple vector types are not divisible?");
44440 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
44441 unsigned NarrowElts = SelElts / NarrowingFactor;
44442 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
44444 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
44445 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
44446 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
44447 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
44448 return DAG.getBitcast(VT, NarrowSel);
44451 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
44452 TargetLowering::DAGCombinerInfo &DCI,
44453 const X86Subtarget &Subtarget) {
44454 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
44455 // eventually get combined/lowered into ANDNP) with a concatenated operand,
44456 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
44457 // We let generic combining take over from there to simplify the
44458 // insert/extract and 'not'.
44459 // This pattern emerges during AVX1 legalization. We handle it before lowering
44460 // to avoid complications like splitting constant vector loads.
44462 // Capture the original wide type in the likely case that we need to bitcast
44463 // back to this type.
44464 if (!N->getValueType(0).isSimple())
44467 MVT VT = N->getSimpleValueType(0);
44468 SDValue InVec = N->getOperand(0);
44469 SDValue InVecBC = peekThroughBitcasts(InVec);
44470 EVT InVecVT = InVec.getValueType();
44471 EVT InVecBCVT = InVecBC.getValueType();
44472 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44474 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
44475 TLI.isTypeLegal(InVecVT) &&
44476 InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
44477 auto isConcatenatedNot = [] (SDValue V) {
44478 V = peekThroughBitcasts(V);
44479 if (!isBitwiseNot(V))
44481 SDValue NotOp = V->getOperand(0);
44482 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
44484 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
44485 isConcatenatedNot(InVecBC.getOperand(1))) {
44486 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
44487 SDValue Concat = split256IntArith(InVecBC, DAG);
44488 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
44489 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
44493 if (DCI.isBeforeLegalizeOps())
44496 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
44499 unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
44501 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
44502 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
44504 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
44505 if (VT.getScalarType() == MVT::i1)
44506 return DAG.getConstant(1, SDLoc(N), VT);
44507 return getOnesVector(VT, DAG, SDLoc(N));
44510 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
44511 return DAG.getBuildVector(
44513 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
44515 // Try to move vector bitcast after extract_subv by scaling extraction index:
44516 // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
44517 // TODO: Move this to DAGCombiner::visitEXTRACT_SUBVECTOR
44518 if (InVec != InVecBC && InVecBCVT.isVector()) {
44519 unsigned SrcNumElts = InVecBCVT.getVectorNumElements();
44520 unsigned DestNumElts = InVecVT.getVectorNumElements();
44521 if ((DestNumElts % SrcNumElts) == 0) {
44522 unsigned DestSrcRatio = DestNumElts / SrcNumElts;
44523 if ((VT.getVectorNumElements() % DestSrcRatio) == 0) {
44524 unsigned NewExtNumElts = VT.getVectorNumElements() / DestSrcRatio;
44525 EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(),
44526 InVecBCVT.getScalarType(), NewExtNumElts);
44527 if ((N->getConstantOperandVal(1) % DestSrcRatio) == 0 &&
44528 TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
44529 unsigned IndexValScaled = N->getConstantOperandVal(1) / DestSrcRatio;
44531 SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
44532 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
44533 InVecBC, NewIndex);
44534 return DAG.getBitcast(VT, NewExtract);
44540 // If we are extracting from an insert into a zero vector, replace with a
44541 // smaller insert into zero if we don't access less than the original
44542 // subvector. Don't do this for i1 vectors.
44543 if (VT.getVectorElementType() != MVT::i1 &&
44544 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
44545 InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
44546 ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
44547 InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
44549 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
44550 getZeroVector(VT, Subtarget, DAG, DL),
44551 InVec.getOperand(1), InVec.getOperand(2));
44554 // If we're extracting from a broadcast then we're better off just
44555 // broadcasting to the smaller type directly, assuming this is the only use.
44556 // As its a broadcast we don't care about the extraction index.
44557 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
44558 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
44559 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
44561 if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
44562 auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
44563 if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
44564 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
44565 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
44567 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
44568 MemIntr->getMemoryVT(),
44569 MemIntr->getMemOperand());
44570 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
44575 // If we're extracting the lowest subvector and we're the only user,
44576 // we may be able to perform this with a smaller vector width.
44577 if (IdxVal == 0 && InVec.hasOneUse()) {
44578 unsigned InOpcode = InVec.getOpcode();
44579 if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
44580 // v2f64 CVTDQ2PD(v4i32).
44581 if (InOpcode == ISD::SINT_TO_FP &&
44582 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44583 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
44585 // v2f64 CVTUDQ2PD(v4i32).
44586 if (InOpcode == ISD::UINT_TO_FP &&
44587 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44588 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
44590 // v2f64 CVTPS2PD(v4f32).
44591 if (InOpcode == ISD::FP_EXTEND &&
44592 InVec.getOperand(0).getValueType() == MVT::v4f32) {
44593 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
44596 if ((InOpcode == ISD::ANY_EXTEND ||
44597 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
44598 InOpcode == ISD::ZERO_EXTEND ||
44599 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
44600 InOpcode == ISD::SIGN_EXTEND ||
44601 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44602 VT.is128BitVector() &&
44603 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
44604 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
44605 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
44607 if (InOpcode == ISD::VSELECT &&
44608 InVec.getOperand(0).getValueType().is256BitVector() &&
44609 InVec.getOperand(1).getValueType().is256BitVector() &&
44610 InVec.getOperand(2).getValueType().is256BitVector()) {
44612 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
44613 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
44614 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
44615 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
44622 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
44623 EVT VT = N->getValueType(0);
44624 SDValue Src = N->getOperand(0);
44627 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
44628 // This occurs frequently in our masked scalar intrinsic code and our
44629 // floating point select lowering with AVX512.
44630 // TODO: SimplifyDemandedBits instead?
44631 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
44632 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44633 if (C->getAPIntValue().isOneValue())
44634 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
44635 Src.getOperand(0));
44637 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
44638 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44639 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
44640 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
44641 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44642 if (C->isNullValue())
44643 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
44644 Src.getOperand(1));
44646 // Reduce v2i64 to v4i32 if we don't need the upper bits.
44647 // TODO: Move to DAGCombine?
44648 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
44649 Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
44650 Src.getOperand(0).getScalarValueSizeInBits() <= 32)
44651 return DAG.getBitcast(
44652 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
44653 DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
44658 // Simplify PMULDQ and PMULUDQ operations.
44659 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
44660 TargetLowering::DAGCombinerInfo &DCI,
44661 const X86Subtarget &Subtarget) {
44662 SDValue LHS = N->getOperand(0);
44663 SDValue RHS = N->getOperand(1);
44665 // Canonicalize constant to RHS.
44666 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
44667 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
44668 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
44670 // Multiply by zero.
44671 // Don't return RHS as it may contain UNDEFs.
44672 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
44673 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
44675 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
44676 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44677 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
44678 return SDValue(N, 0);
44680 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
44681 // convert it to any_extend_invec, due to the LegalOperations check, do the
44682 // conversion directly to a vector shuffle manually. This exposes combine
44683 // opportunities missed by combineExtInVec not calling
44684 // combineX86ShufflesRecursively on SSE4.1 targets.
44685 // FIXME: This is basically a hack around several other issues related to
44686 // ANY_EXTEND_VECTOR_INREG.
44687 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
44688 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
44689 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44690 LHS.getOperand(0).getValueType() == MVT::v4i32) {
44692 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
44693 LHS.getOperand(0), { 0, -1, 1, -1 });
44694 LHS = DAG.getBitcast(MVT::v2i64, LHS);
44695 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
44697 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
44698 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
44699 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44700 RHS.getOperand(0).getValueType() == MVT::v4i32) {
44702 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
44703 RHS.getOperand(0), { 0, -1, 1, -1 });
44704 RHS = DAG.getBitcast(MVT::v2i64, RHS);
44705 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
44711 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
44712 TargetLowering::DAGCombinerInfo &DCI,
44713 const X86Subtarget &Subtarget) {
44714 EVT VT = N->getValueType(0);
44715 SDValue In = N->getOperand(0);
44716 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44718 // Try to merge vector loads and extend_inreg to an extload.
44719 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
44721 auto *Ld = cast<LoadSDNode>(In);
44722 if (Ld->isSimple()) {
44723 MVT SVT = In.getSimpleValueType().getVectorElementType();
44724 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
44725 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
44726 VT.getVectorNumElements());
44727 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
44729 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
44730 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
44731 Ld->getMemOperand()->getFlags());
44732 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
44738 // Attempt to combine as a shuffle.
44739 // TODO: SSE41 support
44740 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
44742 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
44743 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
44750 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
44751 TargetLowering::DAGCombinerInfo &DCI) {
44752 EVT VT = N->getValueType(0);
44754 APInt KnownUndef, KnownZero;
44755 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44756 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
44757 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
44759 return SDValue(N, 0);
44764 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
44765 DAGCombinerInfo &DCI) const {
44766 SelectionDAG &DAG = DCI.DAG;
44767 switch (N->getOpcode()) {
44769 case ISD::SCALAR_TO_VECTOR:
44770 return combineScalarToVector(N, DAG);
44771 case ISD::EXTRACT_VECTOR_ELT:
44772 case X86ISD::PEXTRW:
44773 case X86ISD::PEXTRB:
44774 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
44775 case ISD::CONCAT_VECTORS:
44776 return combineConcatVectors(N, DAG, DCI, Subtarget);
44777 case ISD::INSERT_SUBVECTOR:
44778 return combineInsertSubvector(N, DAG, DCI, Subtarget);
44779 case ISD::EXTRACT_SUBVECTOR:
44780 return combineExtractSubvector(N, DAG, DCI, Subtarget);
44783 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
44784 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
44785 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
44786 case X86ISD::CMP: return combineCMP(N, DAG);
44787 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
44788 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
44790 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
44791 case X86ISD::SBB: return combineSBB(N, DAG);
44792 case X86ISD::ADC: return combineADC(N, DAG, DCI);
44793 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
44794 case ISD::SHL: return combineShiftLeft(N, DAG);
44795 case ISD::SRA: return combineShiftRightArithmetic(N, DAG);
44796 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI);
44797 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
44798 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
44799 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
44800 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
44801 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
44802 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
44803 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
44804 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
44805 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, DCI, Subtarget);
44806 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
44808 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
44809 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
44810 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
44811 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG);
44812 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
44813 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
44814 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
44816 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
44818 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
44820 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
44821 case X86ISD::CVTSI2P:
44822 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
44823 case X86ISD::CVTP2SI:
44824 case X86ISD::CVTP2UI:
44825 case X86ISD::CVTTP2SI:
44826 case X86ISD::CVTTP2UI: return combineCVTP2I_CVTTP2I(N, DAG, DCI);
44827 case X86ISD::BT: return combineBT(N, DAG, DCI);
44828 case ISD::ANY_EXTEND:
44829 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
44830 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
44831 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
44832 case ISD::ANY_EXTEND_VECTOR_INREG:
44833 case ISD::SIGN_EXTEND_VECTOR_INREG:
44834 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
44836 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
44837 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
44838 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
44839 case X86ISD::PACKSS:
44840 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
44844 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
44845 case X86ISD::VSHLI:
44846 case X86ISD::VSRAI:
44847 case X86ISD::VSRLI:
44848 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
44849 case X86ISD::PINSRB:
44850 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
44851 case X86ISD::SHUFP: // Handle all target specific shuffles
44852 case X86ISD::INSERTPS:
44853 case X86ISD::EXTRQI:
44854 case X86ISD::INSERTQI:
44855 case X86ISD::PALIGNR:
44856 case X86ISD::VSHLDQ:
44857 case X86ISD::VSRLDQ:
44858 case X86ISD::BLENDI:
44859 case X86ISD::UNPCKH:
44860 case X86ISD::UNPCKL:
44861 case X86ISD::MOVHLPS:
44862 case X86ISD::MOVLHPS:
44863 case X86ISD::PSHUFB:
44864 case X86ISD::PSHUFD:
44865 case X86ISD::PSHUFHW:
44866 case X86ISD::PSHUFLW:
44867 case X86ISD::MOVSHDUP:
44868 case X86ISD::MOVSLDUP:
44869 case X86ISD::MOVDDUP:
44870 case X86ISD::MOVSS:
44871 case X86ISD::MOVSD:
44872 case X86ISD::VBROADCAST:
44873 case X86ISD::VPPERM:
44874 case X86ISD::VPERMI:
44875 case X86ISD::VPERMV:
44876 case X86ISD::VPERMV3:
44877 case X86ISD::VPERMIL2:
44878 case X86ISD::VPERMILPI:
44879 case X86ISD::VPERMILPV:
44880 case X86ISD::VPERM2X128:
44881 case X86ISD::SHUF128:
44882 case X86ISD::VZEXT_MOVL:
44883 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
44884 case X86ISD::FMADD_RND:
44885 case X86ISD::FMSUB:
44886 case X86ISD::FMSUB_RND:
44887 case X86ISD::FNMADD:
44888 case X86ISD::FNMADD_RND:
44889 case X86ISD::FNMSUB:
44890 case X86ISD::FNMSUB_RND:
44891 case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
44892 case X86ISD::FMADDSUB_RND:
44893 case X86ISD::FMSUBADD_RND:
44894 case X86ISD::FMADDSUB:
44895 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
44896 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
44897 case X86ISD::MGATHER:
44898 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
44900 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
44901 case X86ISD::PCMPEQ:
44902 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
44903 case X86ISD::PMULDQ:
44904 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
44905 case X86ISD::KSHIFTL:
44906 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
44912 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
44913 if (!isTypeLegal(VT))
44916 // There are no vXi8 shifts.
44917 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
44920 // TODO: Almost no 8-bit ops are desirable because they have no actual
44921 // size/speed advantages vs. 32-bit ops, but they do have a major
44922 // potential disadvantage by causing partial register stalls.
44924 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
44925 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
44926 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
44927 // check for a constant operand to the multiply.
44928 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
44931 // i16 instruction encodings are longer and some i16 instructions are slow,
44932 // so those are not desirable.
44933 if (VT == MVT::i16) {
44938 case ISD::SIGN_EXTEND:
44939 case ISD::ZERO_EXTEND:
44940 case ISD::ANY_EXTEND:
44954 // Any legal type not explicitly accounted for above here is desirable.
44958 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
44959 SDValue Value, SDValue Addr,
44960 SelectionDAG &DAG) const {
44961 const Module *M = DAG.getMachineFunction().getMMI().getModule();
44962 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
44963 if (IsCFProtectionSupported) {
44964 // In case control-flow branch protection is enabled, we need to add
44965 // notrack prefix to the indirect branch.
44966 // In order to do that we create NT_BRIND SDNode.
44967 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
44968 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
44971 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
44974 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
44975 EVT VT = Op.getValueType();
44976 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
44977 isa<ConstantSDNode>(Op.getOperand(1));
44979 // i16 is legal, but undesirable since i16 instruction encodings are longer
44980 // and some i16 instructions are slow.
44981 // 8-bit multiply-by-constant can usually be expanded to something cheaper
44982 // using LEA and/or other ALU ops.
44983 if (VT != MVT::i16 && !Is8BitMulByConstant)
44986 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
44987 if (!Op.hasOneUse())
44989 SDNode *User = *Op->use_begin();
44990 if (!ISD::isNormalStore(User))
44992 auto *Ld = cast<LoadSDNode>(Load);
44993 auto *St = cast<StoreSDNode>(User);
44994 return Ld->getBasePtr() == St->getBasePtr();
44997 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
44998 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
45000 if (!Op.hasOneUse())
45002 SDNode *User = *Op->use_begin();
45003 if (User->getOpcode() != ISD::ATOMIC_STORE)
45005 auto *Ld = cast<AtomicSDNode>(Load);
45006 auto *St = cast<AtomicSDNode>(User);
45007 return Ld->getBasePtr() == St->getBasePtr();
45010 bool Commute = false;
45011 switch (Op.getOpcode()) {
45012 default: return false;
45013 case ISD::SIGN_EXTEND:
45014 case ISD::ZERO_EXTEND:
45015 case ISD::ANY_EXTEND:
45020 SDValue N0 = Op.getOperand(0);
45021 // Look out for (store (shl (load), x)).
45022 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
45034 SDValue N0 = Op.getOperand(0);
45035 SDValue N1 = Op.getOperand(1);
45036 // Avoid disabling potential load folding opportunities.
45037 if (MayFoldLoad(N1) &&
45038 (!Commute || !isa<ConstantSDNode>(N0) ||
45039 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
45041 if (MayFoldLoad(N0) &&
45042 ((Commute && !isa<ConstantSDNode>(N1)) ||
45043 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
45045 if (IsFoldableAtomicRMW(N0, Op) ||
45046 (Commute && IsFoldableAtomicRMW(N1, Op)))
45055 bool X86TargetLowering::
45056 isDesirableToCombineBuildVectorToShuffleTruncate(
45057 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
45059 assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
45060 "Element count mismatch");
45062 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
45063 "Shuffle Mask expected to be legal");
45065 // For 32-bit elements VPERMD is better than shuffle+truncate.
45066 // TODO: After we improve lowerBuildVector, add execption for VPERMW.
45067 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
45070 if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
45076 //===----------------------------------------------------------------------===//
45077 // X86 Inline Assembly Support
45078 //===----------------------------------------------------------------------===//
45080 // Helper to match a string separated by whitespace.
45081 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
45082 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
45084 for (StringRef Piece : Pieces) {
45085 if (!S.startswith(Piece)) // Check if the piece matches.
45088 S = S.substr(Piece.size());
45089 StringRef::size_type Pos = S.find_first_not_of(" \t");
45090 if (Pos == 0) // We matched a prefix.
45099 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
45101 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
45102 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
45103 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
45104 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
45106 if (AsmPieces.size() == 3)
45108 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
45115 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
45116 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
45118 const std::string &AsmStr = IA->getAsmString();
45120 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
45121 if (!Ty || Ty->getBitWidth() % 16 != 0)
45124 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
45125 SmallVector<StringRef, 4> AsmPieces;
45126 SplitString(AsmStr, AsmPieces, ";\n");
45128 switch (AsmPieces.size()) {
45129 default: return false;
45131 // FIXME: this should verify that we are targeting a 486 or better. If not,
45132 // we will turn this bswap into something that will be lowered to logical
45133 // ops instead of emitting the bswap asm. For now, we don't support 486 or
45134 // lower so don't worry about this.
45136 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
45137 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
45138 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
45139 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
45140 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
45141 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
45142 // No need to check constraints, nothing other than the equivalent of
45143 // "=r,0" would be valid here.
45144 return IntrinsicLowering::LowerToByteSwap(CI);
45147 // rorw $$8, ${0:w} --> llvm.bswap.i16
45148 if (CI->getType()->isIntegerTy(16) &&
45149 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
45150 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
45151 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
45153 StringRef ConstraintsStr = IA->getConstraintString();
45154 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
45155 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
45156 if (clobbersFlagRegisters(AsmPieces))
45157 return IntrinsicLowering::LowerToByteSwap(CI);
45161 if (CI->getType()->isIntegerTy(32) &&
45162 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
45163 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
45164 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
45165 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
45167 StringRef ConstraintsStr = IA->getConstraintString();
45168 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
45169 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
45170 if (clobbersFlagRegisters(AsmPieces))
45171 return IntrinsicLowering::LowerToByteSwap(CI);
45174 if (CI->getType()->isIntegerTy(64)) {
45175 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
45176 if (Constraints.size() >= 2 &&
45177 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
45178 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
45179 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
45180 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
45181 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
45182 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
45183 return IntrinsicLowering::LowerToByteSwap(CI);
45191 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
45192 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
45193 .Case("{@cca}", X86::COND_A)
45194 .Case("{@ccae}", X86::COND_AE)
45195 .Case("{@ccb}", X86::COND_B)
45196 .Case("{@ccbe}", X86::COND_BE)
45197 .Case("{@ccc}", X86::COND_B)
45198 .Case("{@cce}", X86::COND_E)
45199 .Case("{@ccz}", X86::COND_E)
45200 .Case("{@ccg}", X86::COND_G)
45201 .Case("{@ccge}", X86::COND_GE)
45202 .Case("{@ccl}", X86::COND_L)
45203 .Case("{@ccle}", X86::COND_LE)
45204 .Case("{@ccna}", X86::COND_BE)
45205 .Case("{@ccnae}", X86::COND_B)
45206 .Case("{@ccnb}", X86::COND_AE)
45207 .Case("{@ccnbe}", X86::COND_A)
45208 .Case("{@ccnc}", X86::COND_AE)
45209 .Case("{@ccne}", X86::COND_NE)
45210 .Case("{@ccnz}", X86::COND_NE)
45211 .Case("{@ccng}", X86::COND_LE)
45212 .Case("{@ccnge}", X86::COND_L)
45213 .Case("{@ccnl}", X86::COND_GE)
45214 .Case("{@ccnle}", X86::COND_G)
45215 .Case("{@ccno}", X86::COND_NO)
45216 .Case("{@ccnp}", X86::COND_P)
45217 .Case("{@ccns}", X86::COND_NS)
45218 .Case("{@cco}", X86::COND_O)
45219 .Case("{@ccp}", X86::COND_P)
45220 .Case("{@ccs}", X86::COND_S)
45221 .Default(X86::COND_INVALID);
45225 /// Given a constraint letter, return the type of constraint for this target.
45226 X86TargetLowering::ConstraintType
45227 X86TargetLowering::getConstraintType(StringRef Constraint) const {
45228 if (Constraint.size() == 1) {
45229 switch (Constraint[0]) {
45241 case 'k': // AVX512 masking registers.
45242 return C_RegisterClass;
45258 return C_Immediate;
45267 else if (Constraint.size() == 2) {
45268 switch (Constraint[0]) {
45272 switch (Constraint[1]) {
45283 return C_RegisterClass;
45286 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45288 return TargetLowering::getConstraintType(Constraint);
45291 /// Examine constraint type and operand type and determine a weight value.
45292 /// This object must already have been set up with the operand type
45293 /// and the current alternative constraint selected.
45294 TargetLowering::ConstraintWeight
45295 X86TargetLowering::getSingleConstraintMatchWeight(
45296 AsmOperandInfo &info, const char *constraint) const {
45297 ConstraintWeight weight = CW_Invalid;
45298 Value *CallOperandVal = info.CallOperandVal;
45299 // If we don't have a value, we can't do a match,
45300 // but allow it at the lowest weight.
45301 if (!CallOperandVal)
45303 Type *type = CallOperandVal->getType();
45304 // Look at the constraint type.
45305 switch (*constraint) {
45307 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
45319 if (CallOperandVal->getType()->isIntegerTy())
45320 weight = CW_SpecificReg;
45325 if (type->isFloatingPointTy())
45326 weight = CW_SpecificReg;
45329 if (type->isX86_MMXTy() && Subtarget.hasMMX())
45330 weight = CW_SpecificReg;
45333 unsigned Size = StringRef(constraint).size();
45334 // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
45335 char NextChar = Size == 2 ? constraint[1] : 'i';
45338 switch (NextChar) {
45344 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
45345 return CW_SpecificReg;
45347 // Conditional OpMask regs (AVX512)
45349 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
45350 return CW_Register;
45354 if (type->isX86_MMXTy() && Subtarget.hasMMX())
45357 // Any SSE reg when ISA >= SSE2, same as 'Y'
45361 if (!Subtarget.hasSSE2())
45365 // Fall through (handle "Y" constraint).
45369 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
45370 weight = CW_Register;
45373 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
45374 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
45375 weight = CW_Register;
45378 // Enable conditional vector operations using %k<#> registers.
45379 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
45380 weight = CW_Register;
45383 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
45384 if (C->getZExtValue() <= 31)
45385 weight = CW_Constant;
45389 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45390 if (C->getZExtValue() <= 63)
45391 weight = CW_Constant;
45395 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45396 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
45397 weight = CW_Constant;
45401 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45402 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
45403 weight = CW_Constant;
45407 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45408 if (C->getZExtValue() <= 3)
45409 weight = CW_Constant;
45413 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45414 if (C->getZExtValue() <= 0xff)
45415 weight = CW_Constant;
45420 if (isa<ConstantFP>(CallOperandVal)) {
45421 weight = CW_Constant;
45425 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45426 if ((C->getSExtValue() >= -0x80000000LL) &&
45427 (C->getSExtValue() <= 0x7fffffffLL))
45428 weight = CW_Constant;
45432 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45433 if (C->getZExtValue() <= 0xffffffff)
45434 weight = CW_Constant;
45441 /// Try to replace an X constraint, which matches anything, with another that
45442 /// has more specific requirements based on the type of the corresponding
45444 const char *X86TargetLowering::
45445 LowerXConstraint(EVT ConstraintVT) const {
45446 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
45447 // 'f' like normal targets.
45448 if (ConstraintVT.isFloatingPoint()) {
45449 if (Subtarget.hasSSE2())
45451 if (Subtarget.hasSSE1())
45455 return TargetLowering::LowerXConstraint(ConstraintVT);
45458 // Lower @cc targets via setcc.
45459 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
45460 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
45461 SelectionDAG &DAG) const {
45462 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
45463 if (Cond == X86::COND_INVALID)
45465 // Check that return type is valid.
45466 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
45467 OpInfo.ConstraintVT.getSizeInBits() < 8)
45468 report_fatal_error("Flag output operand is of invalid type");
45470 // Get EFLAGS register. Only update chain when copyfrom is glued.
45471 if (Flag.getNode()) {
45472 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
45473 Chain = Flag.getValue(1);
45475 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
45476 // Extract CC code.
45477 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
45478 // Extend to 32-bits
45479 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
45484 /// Lower the specified operand into the Ops vector.
45485 /// If it is invalid, don't add anything to Ops.
45486 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
45487 std::string &Constraint,
45488 std::vector<SDValue>&Ops,
45489 SelectionDAG &DAG) const {
45492 // Only support length 1 constraints for now.
45493 if (Constraint.length() > 1) return;
45495 char ConstraintLetter = Constraint[0];
45496 switch (ConstraintLetter) {
45499 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45500 if (C->getZExtValue() <= 31) {
45501 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45502 Op.getValueType());
45508 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45509 if (C->getZExtValue() <= 63) {
45510 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45511 Op.getValueType());
45517 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45518 if (isInt<8>(C->getSExtValue())) {
45519 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45520 Op.getValueType());
45526 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45527 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
45528 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
45529 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
45530 Op.getValueType());
45536 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45537 if (C->getZExtValue() <= 3) {
45538 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45539 Op.getValueType());
45545 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45546 if (C->getZExtValue() <= 255) {
45547 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45548 Op.getValueType());
45554 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45555 if (C->getZExtValue() <= 127) {
45556 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45557 Op.getValueType());
45563 // 32-bit signed value
45564 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45565 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45566 C->getSExtValue())) {
45567 // Widen to 64 bits here to get it sign extended.
45568 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
45571 // FIXME gcc accepts some relocatable values here too, but only in certain
45572 // memory models; it's complicated.
45577 // 32-bit unsigned value
45578 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45579 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45580 C->getZExtValue())) {
45581 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45582 Op.getValueType());
45586 // FIXME gcc accepts some relocatable values here too, but only in certain
45587 // memory models; it's complicated.
45591 // Literal immediates are always ok.
45592 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
45593 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
45594 BooleanContent BCont = getBooleanContents(MVT::i64);
45595 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
45596 : ISD::SIGN_EXTEND;
45597 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
45598 : CST->getSExtValue();
45599 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
45603 // In any sort of PIC mode addresses need to be computed at runtime by
45604 // adding in a register or some sort of table lookup. These can't
45605 // be used as immediates.
45606 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
45609 // If we are in non-pic codegen mode, we allow the address of a global (with
45610 // an optional displacement) to be used with 'i'.
45611 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
45612 // If we require an extra load to get this address, as in PIC mode, we
45613 // can't accept it.
45614 if (isGlobalStubReference(
45615 Subtarget.classifyGlobalReference(GA->getGlobal())))
45621 if (Result.getNode()) {
45622 Ops.push_back(Result);
45625 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
45628 /// Check if \p RC is a general purpose register class.
45629 /// I.e., GR* or one of their variant.
45630 static bool isGRClass(const TargetRegisterClass &RC) {
45631 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
45632 RC.hasSuperClassEq(&X86::GR16RegClass) ||
45633 RC.hasSuperClassEq(&X86::GR32RegClass) ||
45634 RC.hasSuperClassEq(&X86::GR64RegClass) ||
45635 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
45638 /// Check if \p RC is a vector register class.
45639 /// I.e., FR* / VR* or one of their variant.
45640 static bool isFRClass(const TargetRegisterClass &RC) {
45641 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
45642 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
45643 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
45644 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
45645 RC.hasSuperClassEq(&X86::VR512RegClass);
45648 /// Check if \p RC is a mask register class.
45649 /// I.e., VK* or one of their variant.
45650 static bool isVKClass(const TargetRegisterClass &RC) {
45651 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
45652 RC.hasSuperClassEq(&X86::VK2RegClass) ||
45653 RC.hasSuperClassEq(&X86::VK4RegClass) ||
45654 RC.hasSuperClassEq(&X86::VK8RegClass) ||
45655 RC.hasSuperClassEq(&X86::VK16RegClass) ||
45656 RC.hasSuperClassEq(&X86::VK32RegClass) ||
45657 RC.hasSuperClassEq(&X86::VK64RegClass);
45660 std::pair<unsigned, const TargetRegisterClass *>
45661 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
45662 StringRef Constraint,
45664 // First, see if this is a constraint that directly corresponds to an LLVM
45666 if (Constraint.size() == 1) {
45667 // GCC Constraint Letters
45668 switch (Constraint[0]) {
45670 // 'A' means [ER]AX + [ER]DX.
45672 if (Subtarget.is64Bit())
45673 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
45674 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
45675 "Expecting 64, 32 or 16 bit subtarget");
45676 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45678 // TODO: Slight differences here in allocation order and leaving
45679 // RIP in the class. Do they matter any more here than they do
45680 // in the normal allocation?
45682 if (Subtarget.hasAVX512()) {
45684 return std::make_pair(0U, &X86::VK1RegClass);
45686 return std::make_pair(0U, &X86::VK8RegClass);
45687 if (VT == MVT::i16)
45688 return std::make_pair(0U, &X86::VK16RegClass);
45690 if (Subtarget.hasBWI()) {
45691 if (VT == MVT::i32)
45692 return std::make_pair(0U, &X86::VK32RegClass);
45693 if (VT == MVT::i64)
45694 return std::make_pair(0U, &X86::VK64RegClass);
45697 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
45698 if (Subtarget.is64Bit()) {
45699 if (VT == MVT::i32 || VT == MVT::f32)
45700 return std::make_pair(0U, &X86::GR32RegClass);
45701 if (VT == MVT::i16)
45702 return std::make_pair(0U, &X86::GR16RegClass);
45703 if (VT == MVT::i8 || VT == MVT::i1)
45704 return std::make_pair(0U, &X86::GR8RegClass);
45705 if (VT == MVT::i64 || VT == MVT::f64)
45706 return std::make_pair(0U, &X86::GR64RegClass);
45710 // 32-bit fallthrough
45711 case 'Q': // Q_REGS
45712 if (VT == MVT::i32 || VT == MVT::f32)
45713 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
45714 if (VT == MVT::i16)
45715 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
45716 if (VT == MVT::i8 || VT == MVT::i1)
45717 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
45718 if (VT == MVT::i64)
45719 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
45721 case 'r': // GENERAL_REGS
45722 case 'l': // INDEX_REGS
45723 if (VT == MVT::i8 || VT == MVT::i1)
45724 return std::make_pair(0U, &X86::GR8RegClass);
45725 if (VT == MVT::i16)
45726 return std::make_pair(0U, &X86::GR16RegClass);
45727 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
45728 return std::make_pair(0U, &X86::GR32RegClass);
45729 return std::make_pair(0U, &X86::GR64RegClass);
45730 case 'R': // LEGACY_REGS
45731 if (VT == MVT::i8 || VT == MVT::i1)
45732 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
45733 if (VT == MVT::i16)
45734 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
45735 if (VT == MVT::i32 || !Subtarget.is64Bit())
45736 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
45737 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
45738 case 'f': // FP Stack registers.
45739 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
45740 // value to the correct fpstack register class.
45741 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
45742 return std::make_pair(0U, &X86::RFP32RegClass);
45743 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
45744 return std::make_pair(0U, &X86::RFP64RegClass);
45745 return std::make_pair(0U, &X86::RFP80RegClass);
45746 case 'y': // MMX_REGS if MMX allowed.
45747 if (!Subtarget.hasMMX()) break;
45748 return std::make_pair(0U, &X86::VR64RegClass);
45749 case 'Y': // SSE_REGS if SSE2 allowed
45750 if (!Subtarget.hasSSE2()) break;
45753 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
45754 if (!Subtarget.hasSSE1()) break;
45755 bool VConstraint = (Constraint[0] == 'v');
45757 switch (VT.SimpleTy) {
45759 // Scalar SSE types.
45762 if (VConstraint && Subtarget.hasVLX())
45763 return std::make_pair(0U, &X86::FR32XRegClass);
45764 return std::make_pair(0U, &X86::FR32RegClass);
45767 if (VConstraint && Subtarget.hasVLX())
45768 return std::make_pair(0U, &X86::FR64XRegClass);
45769 return std::make_pair(0U, &X86::FR64RegClass);
45770 // TODO: Handle i128 in FR128RegClass after it is tested well.
45771 // Vector types and fp128.
45779 if (VConstraint && Subtarget.hasVLX())
45780 return std::make_pair(0U, &X86::VR128XRegClass);
45781 return std::make_pair(0U, &X86::VR128RegClass);
45789 if (VConstraint && Subtarget.hasVLX())
45790 return std::make_pair(0U, &X86::VR256XRegClass);
45791 if (Subtarget.hasAVX())
45792 return std::make_pair(0U, &X86::VR256RegClass);
45798 if (!Subtarget.hasAVX512()) break;
45800 return std::make_pair(0U, &X86::VR512RegClass);
45801 return std::make_pair(0U, &X86::VR512_0_15RegClass);
45805 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
45806 switch (Constraint[1]) {
45812 return getRegForInlineAsmConstraint(TRI, "Y", VT);
45814 if (!Subtarget.hasMMX()) break;
45815 return std::make_pair(0U, &X86::VR64RegClass);
45818 if (!Subtarget.hasSSE1()) break;
45819 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
45821 // This register class doesn't allocate k0 for masked vector operation.
45822 if (Subtarget.hasAVX512()) {
45824 return std::make_pair(0U, &X86::VK1WMRegClass);
45826 return std::make_pair(0U, &X86::VK8WMRegClass);
45827 if (VT == MVT::i16)
45828 return std::make_pair(0U, &X86::VK16WMRegClass);
45830 if (Subtarget.hasBWI()) {
45831 if (VT == MVT::i32)
45832 return std::make_pair(0U, &X86::VK32WMRegClass);
45833 if (VT == MVT::i64)
45834 return std::make_pair(0U, &X86::VK64WMRegClass);
45840 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45841 return std::make_pair(0U, &X86::GR32RegClass);
45843 // Use the default implementation in TargetLowering to convert the register
45844 // constraint into a member of a register class.
45845 std::pair<unsigned, const TargetRegisterClass*> Res;
45846 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
45848 // Not found as a standard register?
45850 // Map st(0) -> st(7) -> ST0
45851 if (Constraint.size() == 7 && Constraint[0] == '{' &&
45852 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
45853 Constraint[3] == '(' &&
45854 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
45855 Constraint[5] == ')' && Constraint[6] == '}') {
45856 // st(7) is not allocatable and thus not a member of RFP80. Return
45857 // singleton class in cases where we have a reference to it.
45858 if (Constraint[4] == '7')
45859 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
45860 return std::make_pair(X86::FP0 + Constraint[4] - '0',
45861 &X86::RFP80RegClass);
45864 // GCC allows "st(0)" to be called just plain "st".
45865 if (StringRef("{st}").equals_lower(Constraint))
45866 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
45869 if (StringRef("{flags}").equals_lower(Constraint))
45870 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
45873 if (StringRef("{dirflag}").equals_lower(Constraint))
45874 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
45877 if (StringRef("{fpsr}").equals_lower(Constraint))
45878 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
45883 // Make sure it isn't a register that requires 64-bit mode.
45884 if (!Subtarget.is64Bit() &&
45885 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
45886 TRI->getEncodingValue(Res.first) >= 8) {
45887 // Register requires REX prefix, but we're in 32-bit mode.
45888 return std::make_pair(0, nullptr);
45891 // Make sure it isn't a register that requires AVX512.
45892 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
45893 TRI->getEncodingValue(Res.first) & 0x10) {
45894 // Register requires EVEX prefix.
45895 return std::make_pair(0, nullptr);
45898 // Otherwise, check to see if this is a register class of the wrong value
45899 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
45900 // turn into {ax},{dx}.
45901 // MVT::Other is used to specify clobber names.
45902 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
45903 return Res; // Correct type already, nothing to do.
45905 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
45906 // return "eax". This should even work for things like getting 64bit integer
45907 // registers when given an f64 type.
45908 const TargetRegisterClass *Class = Res.second;
45909 // The generic code will match the first register class that contains the
45910 // given register. Thus, based on the ordering of the tablegened file,
45911 // the "plain" GR classes might not come first.
45912 // Therefore, use a helper method.
45913 if (isGRClass(*Class)) {
45914 unsigned Size = VT.getSizeInBits();
45915 if (Size == 1) Size = 8;
45916 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
45918 bool is64Bit = Subtarget.is64Bit();
45919 const TargetRegisterClass *RC =
45920 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
45921 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
45922 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
45923 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
45925 if (Size == 64 && !is64Bit) {
45926 // Model GCC's behavior here and select a fixed pair of 32-bit
45930 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45932 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
45934 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
45936 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
45938 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
45940 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
45942 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
45944 return std::make_pair(0, nullptr);
45947 if (RC && RC->contains(DestReg))
45948 return std::make_pair(DestReg, RC);
45951 // No register found/type mismatch.
45952 return std::make_pair(0, nullptr);
45953 } else if (isFRClass(*Class)) {
45954 // Handle references to XMM physical registers that got mapped into the
45955 // wrong class. This can happen with constraints like {xmm0} where the
45956 // target independent register mapper will just pick the first match it can
45957 // find, ignoring the required type.
45959 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
45960 if (VT == MVT::f32 || VT == MVT::i32)
45961 Res.second = &X86::FR32XRegClass;
45962 else if (VT == MVT::f64 || VT == MVT::i64)
45963 Res.second = &X86::FR64XRegClass;
45964 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
45965 Res.second = &X86::VR128XRegClass;
45966 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
45967 Res.second = &X86::VR256XRegClass;
45968 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
45969 Res.second = &X86::VR512RegClass;
45971 // Type mismatch and not a clobber: Return an error;
45973 Res.second = nullptr;
45975 } else if (isVKClass(*Class)) {
45977 Res.second = &X86::VK1RegClass;
45978 else if (VT == MVT::i8)
45979 Res.second = &X86::VK8RegClass;
45980 else if (VT == MVT::i16)
45981 Res.second = &X86::VK16RegClass;
45982 else if (VT == MVT::i32)
45983 Res.second = &X86::VK32RegClass;
45984 else if (VT == MVT::i64)
45985 Res.second = &X86::VK64RegClass;
45987 // Type mismatch and not a clobber: Return an error;
45989 Res.second = nullptr;
45996 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
45997 const AddrMode &AM, Type *Ty,
45998 unsigned AS) const {
45999 // Scaling factors are not free at all.
46000 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
46001 // will take 2 allocations in the out of order engine instead of 1
46002 // for plain addressing mode, i.e. inst (reg1).
46004 // vaddps (%rsi,%rdx), %ymm0, %ymm1
46005 // Requires two allocations (one for the load, one for the computation)
46007 // vaddps (%rsi), %ymm0, %ymm1
46008 // Requires just 1 allocation, i.e., freeing allocations for other operations
46009 // and having less micro operations to execute.
46011 // For some X86 architectures, this is even worse because for instance for
46012 // stores, the complex addressing mode forces the instruction to use the
46013 // "load" ports instead of the dedicated "store" port.
46014 // E.g., on Haswell:
46015 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
46016 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
46017 if (isLegalAddressingMode(DL, AM, Ty, AS))
46018 // Scale represents reg2 * scale, thus account for 1
46019 // as soon as we use a second register.
46020 return AM.Scale != 0;
46024 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
46025 // Integer division on x86 is expensive. However, when aggressively optimizing
46026 // for code size, we prefer to use a div instruction, as it is usually smaller
46027 // than the alternative sequence.
46028 // The exception to this is vector division. Since x86 doesn't have vector
46029 // integer division, leaving the division as-is is a loss even in terms of
46030 // size, because it will have to be scalarized, while the alternative code
46031 // sequence can be performed in vector form.
46033 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
46034 return OptSize && !VT.isVector();
46037 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
46038 if (!Subtarget.is64Bit())
46041 // Update IsSplitCSR in X86MachineFunctionInfo.
46042 X86MachineFunctionInfo *AFI =
46043 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
46044 AFI->setIsSplitCSR(true);
46047 void X86TargetLowering::insertCopiesSplitCSR(
46048 MachineBasicBlock *Entry,
46049 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
46050 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
46051 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
46055 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
46056 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
46057 MachineBasicBlock::iterator MBBI = Entry->begin();
46058 for (const MCPhysReg *I = IStart; *I; ++I) {
46059 const TargetRegisterClass *RC = nullptr;
46060 if (X86::GR64RegClass.contains(*I))
46061 RC = &X86::GR64RegClass;
46063 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
46065 Register NewVR = MRI->createVirtualRegister(RC);
46066 // Create copy from CSR to a virtual register.
46067 // FIXME: this currently does not emit CFI pseudo-instructions, it works
46068 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
46069 // nounwind. If we want to generalize this later, we may need to emit
46070 // CFI pseudo-instructions.
46072 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
46073 "Function should be nounwind in insertCopiesSplitCSR!");
46074 Entry->addLiveIn(*I);
46075 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
46078 // Insert the copy-back instructions right before the terminator.
46079 for (auto *Exit : Exits)
46080 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
46081 TII->get(TargetOpcode::COPY), *I)
46086 bool X86TargetLowering::supportSwiftError() const {
46087 return Subtarget.is64Bit();
46090 /// Returns the name of the symbol used to emit stack probes or the empty
46091 /// string if not applicable.
46093 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
46094 // If the function specifically requests stack probes, emit them.
46095 if (MF.getFunction().hasFnAttribute("probe-stack"))
46096 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
46098 // Generally, if we aren't on Windows, the platform ABI does not include
46099 // support for stack probes, so don't emit them.
46100 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
46101 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
46104 // We need a stack probe to conform to the Windows ABI. Choose the right
46106 if (Subtarget.is64Bit())
46107 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
46108 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
46112 X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
46113 // The default stack probe size is 4096 if the function has no stackprobesize
46115 unsigned StackProbeSize = 4096;
46116 const Function &Fn = MF.getFunction();
46117 if (Fn.hasFnAttribute("stack-probe-size"))
46118 Fn.getFnAttribute("stack-probe-size")
46119 .getValueAsString()
46120 .getAsInteger(0, StackProbeSize);
46121 return StackProbeSize;